mcp-use 1.3.3__py3-none-any.whl → 1.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use/adapters/base.py +2 -7
- mcp_use/agents/mcpagent.py +247 -148
- mcp_use/client.py +1 -3
- mcp_use/connectors/base.py +19 -18
- mcp_use/connectors/http.py +26 -1
- mcp_use/connectors/sandbox.py +7 -8
- mcp_use/managers/tools/search_tools.py +27 -5
- {mcp_use-1.3.3.dist-info → mcp_use-1.3.5.dist-info}/METADATA +29 -29
- {mcp_use-1.3.3.dist-info → mcp_use-1.3.5.dist-info}/RECORD +11 -11
- {mcp_use-1.3.3.dist-info → mcp_use-1.3.5.dist-info}/WHEEL +0 -0
- {mcp_use-1.3.3.dist-info → mcp_use-1.3.5.dist-info}/licenses/LICENSE +0 -0
mcp_use/adapters/base.py
CHANGED
|
@@ -33,8 +33,7 @@ class BaseAdapter(ABC):
|
|
|
33
33
|
self.disallowed_tools = disallowed_tools or []
|
|
34
34
|
self._connector_tool_map: dict[BaseConnector, list[T]] = {}
|
|
35
35
|
|
|
36
|
-
|
|
37
|
-
async def create_tools(cls, client: "MCPClient", disallowed_tools: list[str] | None = None) -> list[T]:
|
|
36
|
+
async def create_tools(self, client: "MCPClient") -> list[T]:
|
|
38
37
|
"""Create tools from an MCPClient instance.
|
|
39
38
|
|
|
40
39
|
This is the recommended way to create tools from an MCPClient, as it handles
|
|
@@ -42,7 +41,6 @@ class BaseAdapter(ABC):
|
|
|
42
41
|
|
|
43
42
|
Args:
|
|
44
43
|
client: The MCPClient to extract tools from.
|
|
45
|
-
disallowed_tools: Optional list of tool names to exclude.
|
|
46
44
|
|
|
47
45
|
Returns:
|
|
48
46
|
A list of tools in the target framework's format.
|
|
@@ -56,9 +54,6 @@ class BaseAdapter(ABC):
|
|
|
56
54
|
tools = await YourAdapter.create_tools(client)
|
|
57
55
|
```
|
|
58
56
|
"""
|
|
59
|
-
# Create the adapter
|
|
60
|
-
adapter = cls(disallowed_tools=disallowed_tools)
|
|
61
|
-
|
|
62
57
|
# Ensure we have active sessions
|
|
63
58
|
if not client.active_sessions:
|
|
64
59
|
logger.info("No active sessions found, creating new ones...")
|
|
@@ -71,7 +66,7 @@ class BaseAdapter(ABC):
|
|
|
71
66
|
connectors = [session.connector for session in sessions.values()]
|
|
72
67
|
|
|
73
68
|
# Create tools from connectors
|
|
74
|
-
return await
|
|
69
|
+
return await self._create_tools_from_connectors(connectors)
|
|
75
70
|
|
|
76
71
|
async def load_tools_for_connector(self, connector: BaseConnector) -> list[T]:
|
|
77
72
|
"""Dynamically load tools for a specific connector.
|
mcp_use/agents/mcpagent.py
CHANGED
|
@@ -7,7 +7,7 @@ to provide a simple interface for using MCP tools with different LLMs.
|
|
|
7
7
|
|
|
8
8
|
import logging
|
|
9
9
|
import time
|
|
10
|
-
from collections.abc import AsyncIterator
|
|
10
|
+
from collections.abc import AsyncGenerator, AsyncIterator
|
|
11
11
|
|
|
12
12
|
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
|
13
13
|
from langchain.agents.output_parsers.tools import ToolAgentAction
|
|
@@ -106,9 +106,10 @@ class MCPAgent:
|
|
|
106
106
|
raise ValueError("Client must be provided when using server manager")
|
|
107
107
|
self.server_manager = ServerManager(self.client, self.adapter)
|
|
108
108
|
|
|
109
|
-
# State tracking
|
|
109
|
+
# State tracking - initialize _tools as empty list
|
|
110
110
|
self._agent_executor: AgentExecutor | None = None
|
|
111
111
|
self._system_message: SystemMessage | None = None
|
|
112
|
+
self._tools: list[BaseTool] = []
|
|
112
113
|
|
|
113
114
|
# Track model info for telemetry
|
|
114
115
|
self._model_provider, self._model_name = extract_model_info(self.llm)
|
|
@@ -304,150 +305,54 @@ class MCPAgent:
|
|
|
304
305
|
"""
|
|
305
306
|
return self.disallowed_tools
|
|
306
307
|
|
|
307
|
-
async def
|
|
308
|
+
async def _consume_and_return(
|
|
308
309
|
self,
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
external_history: list[BaseMessage] | None = None,
|
|
313
|
-
) -> AsyncIterator[StreamEvent]:
|
|
314
|
-
"""Internal async generator yielding response chunks.
|
|
315
|
-
|
|
316
|
-
The implementation purposefully keeps the logic compact:
|
|
317
|
-
1. Ensure the agent is initialised (optionally handling connector
|
|
318
|
-
lifecycle).
|
|
319
|
-
2. Forward the *same* inputs we use for ``run`` to LangChain's
|
|
320
|
-
``AgentExecutor.astream``.
|
|
321
|
-
3. Diff the growing ``output`` field coming from LangChain and yield
|
|
322
|
-
only the new part so the caller receives *incremental* chunks.
|
|
323
|
-
4. Persist conversation history when memory is enabled.
|
|
324
|
-
"""
|
|
325
|
-
|
|
326
|
-
# 1. Initialise on-demand ------------------------------------------------
|
|
327
|
-
initialised_here = False
|
|
328
|
-
if (manage_connector and not self._initialized) or (not self._initialized and self.auto_initialize):
|
|
329
|
-
await self.initialize()
|
|
330
|
-
initialised_here = True
|
|
331
|
-
|
|
332
|
-
if not self._agent_executor:
|
|
333
|
-
raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
|
|
334
|
-
|
|
335
|
-
# 2. Build inputs --------------------------------------------------------
|
|
336
|
-
effective_max_steps = max_steps or self.max_steps
|
|
337
|
-
self._agent_executor.max_iterations = effective_max_steps
|
|
338
|
-
|
|
339
|
-
if self.memory_enabled:
|
|
340
|
-
self.add_to_history(HumanMessage(content=query))
|
|
341
|
-
|
|
342
|
-
history_to_use = external_history if external_history is not None else self._conversation_history
|
|
343
|
-
inputs = {"input": query, "chat_history": history_to_use}
|
|
310
|
+
generator: AsyncGenerator[tuple[AgentAction, str], str],
|
|
311
|
+
) -> str:
|
|
312
|
+
"""Consume the generator and return the final result.
|
|
344
313
|
|
|
345
|
-
|
|
346
|
-
async
|
|
347
|
-
|
|
348
|
-
output = event["data"]["output"]
|
|
349
|
-
if isinstance(output, list):
|
|
350
|
-
for message in output:
|
|
351
|
-
if not isinstance(message, ToolAgentAction):
|
|
352
|
-
self.add_to_history(message)
|
|
353
|
-
yield event
|
|
354
|
-
# 5. House-keeping -------------------------------------------------------
|
|
355
|
-
# Restrict agent cleanup in _generate_response_chunks_async to only occur
|
|
356
|
-
# when the agent was initialized in this generator and is not client-managed
|
|
357
|
-
# and the user does want us to manage the connection.
|
|
358
|
-
if not self.client and initialised_here and manage_connector:
|
|
359
|
-
logger.info("🧹 Closing agent after generator completion")
|
|
360
|
-
await self.close()
|
|
314
|
+
This method manually iterates through the generator to consume the steps.
|
|
315
|
+
In Python, async generators cannot return values directly, so we expect
|
|
316
|
+
the final result to be yielded as a special marker.
|
|
361
317
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
query: str,
|
|
365
|
-
max_steps: int | None = None,
|
|
366
|
-
manage_connector: bool = True,
|
|
367
|
-
external_history: list[BaseMessage] | None = None,
|
|
368
|
-
) -> AsyncIterator[str]:
|
|
369
|
-
"""Asynchronous streaming interface.
|
|
370
|
-
|
|
371
|
-
Example::
|
|
318
|
+
Args:
|
|
319
|
+
generator: The async generator that yields steps and a final result.
|
|
372
320
|
|
|
373
|
-
|
|
374
|
-
|
|
321
|
+
Returns:
|
|
322
|
+
The final result from the generator.
|
|
375
323
|
"""
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
total_response_length += len(chunk)
|
|
391
|
-
yield chunk
|
|
392
|
-
success = True
|
|
393
|
-
finally:
|
|
394
|
-
# Track comprehensive execution data for streaming
|
|
395
|
-
execution_time_ms = int((time.time() - start_time) * 1000)
|
|
396
|
-
|
|
397
|
-
server_count = 0
|
|
398
|
-
if self.client:
|
|
399
|
-
server_count = len(self.client.get_all_active_sessions())
|
|
400
|
-
elif self.connectors:
|
|
401
|
-
server_count = len(self.connectors)
|
|
402
|
-
|
|
403
|
-
conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
|
|
404
|
-
|
|
405
|
-
self.telemetry.track_agent_execution(
|
|
406
|
-
execution_method="astream",
|
|
407
|
-
query=query,
|
|
408
|
-
success=success,
|
|
409
|
-
model_provider=self._model_provider,
|
|
410
|
-
model_name=self._model_name,
|
|
411
|
-
server_count=server_count,
|
|
412
|
-
server_identifiers=[connector.public_identifier for connector in self.connectors],
|
|
413
|
-
total_tools_available=len(self._tools) if self._tools else 0,
|
|
414
|
-
tools_available_names=[tool.name for tool in self._tools],
|
|
415
|
-
max_steps_configured=self.max_steps,
|
|
416
|
-
memory_enabled=self.memory_enabled,
|
|
417
|
-
use_server_manager=self.use_server_manager,
|
|
418
|
-
max_steps_used=max_steps,
|
|
419
|
-
manage_connector=manage_connector,
|
|
420
|
-
external_history_used=external_history is not None,
|
|
421
|
-
response=f"[STREAMED RESPONSE - {total_response_length} chars]",
|
|
422
|
-
execution_time_ms=execution_time_ms,
|
|
423
|
-
error_type=None if success else "streaming_error",
|
|
424
|
-
conversation_history_length=conversation_history_length,
|
|
425
|
-
)
|
|
426
|
-
|
|
427
|
-
async def run(
|
|
324
|
+
final_result = ""
|
|
325
|
+
steps_taken = 0
|
|
326
|
+
tools_used_names = []
|
|
327
|
+
async for item in generator:
|
|
328
|
+
# If it's a string, it's the final result
|
|
329
|
+
if isinstance(item, str):
|
|
330
|
+
final_result = item
|
|
331
|
+
break
|
|
332
|
+
# Otherwise it's a step tuple, just consume it
|
|
333
|
+
steps_taken += 1
|
|
334
|
+
tools_used_names.append(item[0].tool)
|
|
335
|
+
return final_result, steps_taken, tools_used_names
|
|
336
|
+
|
|
337
|
+
async def stream(
|
|
428
338
|
self,
|
|
429
339
|
query: str,
|
|
430
340
|
max_steps: int | None = None,
|
|
431
341
|
manage_connector: bool = True,
|
|
432
342
|
external_history: list[BaseMessage] | None = None,
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
This method handles connecting to the MCP server, initializing the agent,
|
|
437
|
-
running the query, and then cleaning up the connection.
|
|
343
|
+
track_execution: bool = True,
|
|
344
|
+
) -> AsyncGenerator[tuple[AgentAction, str] | str, None]:
|
|
345
|
+
"""Run the agent and yield intermediate steps as an async generator.
|
|
438
346
|
|
|
439
347
|
Args:
|
|
440
348
|
query: The query to run.
|
|
441
349
|
max_steps: Optional maximum number of steps to take.
|
|
442
350
|
manage_connector: Whether to handle the connector lifecycle internally.
|
|
443
|
-
If True, this method will connect, initialize, and disconnect from
|
|
444
|
-
the connector automatically. If False, the caller is responsible
|
|
445
|
-
for managing the connector lifecycle.
|
|
446
351
|
external_history: Optional external history to use instead of the
|
|
447
352
|
internal conversation history.
|
|
448
353
|
|
|
449
|
-
|
|
450
|
-
|
|
354
|
+
Yields:
|
|
355
|
+
Intermediate steps as (AgentAction, str) tuples, followed by the final result as a string.
|
|
451
356
|
"""
|
|
452
357
|
result = ""
|
|
453
358
|
initialized_here = False
|
|
@@ -546,11 +451,13 @@ class MCPAgent:
|
|
|
546
451
|
result = next_step_output.return_values.get("output", "No output generated")
|
|
547
452
|
break
|
|
548
453
|
|
|
549
|
-
# If it's actions/steps, add to intermediate steps
|
|
454
|
+
# If it's actions/steps, add to intermediate steps and yield them
|
|
550
455
|
intermediate_steps.extend(next_step_output)
|
|
551
456
|
|
|
552
|
-
#
|
|
553
|
-
for
|
|
457
|
+
# Yield each step and track tool usage
|
|
458
|
+
for agent_step in next_step_output:
|
|
459
|
+
yield agent_step
|
|
460
|
+
action, observation = agent_step
|
|
554
461
|
tool_name = action.tool
|
|
555
462
|
tools_used_names.append(tool_name)
|
|
556
463
|
tool_input_str = str(action.tool_input)
|
|
@@ -559,11 +466,11 @@ class MCPAgent:
|
|
|
559
466
|
tool_input_str = tool_input_str[:97] + "..."
|
|
560
467
|
logger.info(f"🔧 Tool call: {tool_name} with input: {tool_input_str}")
|
|
561
468
|
# Truncate long outputs for readability
|
|
562
|
-
|
|
563
|
-
if len(
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
logger.info(f"📄 Tool result: {
|
|
469
|
+
observation_str = str(observation)
|
|
470
|
+
if len(observation_str) > 100:
|
|
471
|
+
observation_str = observation_str[:97] + "..."
|
|
472
|
+
observation_str = observation_str.replace("\n", " ")
|
|
473
|
+
logger.info(f"📄 Tool result: {observation_str}")
|
|
567
474
|
|
|
568
475
|
# Check for return_direct on the last action taken
|
|
569
476
|
if len(next_step_output) > 0:
|
|
@@ -597,12 +504,14 @@ class MCPAgent:
|
|
|
597
504
|
|
|
598
505
|
logger.info(f"🎉 Agent execution complete in {time.time() - start_time} seconds")
|
|
599
506
|
success = True
|
|
600
|
-
|
|
507
|
+
|
|
508
|
+
# Yield the final result as a string
|
|
509
|
+
yield result
|
|
601
510
|
|
|
602
511
|
except Exception as e:
|
|
603
512
|
logger.error(f"❌ Error running query: {e}")
|
|
604
513
|
if initialized_here and manage_connector:
|
|
605
|
-
logger.info("🧹 Cleaning up resources after initialization error in
|
|
514
|
+
logger.info("🧹 Cleaning up resources after initialization error in stream")
|
|
606
515
|
await self.close()
|
|
607
516
|
raise
|
|
608
517
|
|
|
@@ -617,13 +526,84 @@ class MCPAgent:
|
|
|
617
526
|
server_count = len(self.connectors)
|
|
618
527
|
|
|
619
528
|
conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
|
|
529
|
+
|
|
530
|
+
# Safely access _tools in case initialization failed
|
|
531
|
+
tools_available = getattr(self, "_tools", [])
|
|
532
|
+
|
|
533
|
+
if track_execution:
|
|
534
|
+
self.telemetry.track_agent_execution(
|
|
535
|
+
execution_method="stream",
|
|
536
|
+
query=query,
|
|
537
|
+
success=success,
|
|
538
|
+
model_provider=self._model_provider,
|
|
539
|
+
model_name=self._model_name,
|
|
540
|
+
server_count=server_count,
|
|
541
|
+
server_identifiers=[connector.public_identifier for connector in self.connectors],
|
|
542
|
+
total_tools_available=len(tools_available),
|
|
543
|
+
tools_available_names=[tool.name for tool in tools_available],
|
|
544
|
+
max_steps_configured=self.max_steps,
|
|
545
|
+
memory_enabled=self.memory_enabled,
|
|
546
|
+
use_server_manager=self.use_server_manager,
|
|
547
|
+
max_steps_used=max_steps,
|
|
548
|
+
manage_connector=manage_connector,
|
|
549
|
+
external_history_used=external_history is not None,
|
|
550
|
+
steps_taken=steps_taken,
|
|
551
|
+
tools_used_count=len(tools_used_names),
|
|
552
|
+
tools_used_names=tools_used_names,
|
|
553
|
+
response=result,
|
|
554
|
+
execution_time_ms=execution_time_ms,
|
|
555
|
+
error_type=None if success else "execution_error",
|
|
556
|
+
conversation_history_length=conversation_history_length,
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
# Clean up if necessary (e.g., if not using client-managed sessions)
|
|
560
|
+
if manage_connector and not self.client and initialized_here:
|
|
561
|
+
logger.info("🧹 Closing agent after stream completion")
|
|
562
|
+
await self.close()
|
|
563
|
+
|
|
564
|
+
async def run(
|
|
565
|
+
self,
|
|
566
|
+
query: str,
|
|
567
|
+
max_steps: int | None = None,
|
|
568
|
+
manage_connector: bool = True,
|
|
569
|
+
external_history: list[BaseMessage] | None = None,
|
|
570
|
+
) -> str:
|
|
571
|
+
"""Run a query using the MCP tools and return the final result.
|
|
572
|
+
|
|
573
|
+
This method uses the streaming implementation internally and returns
|
|
574
|
+
the final result after consuming all intermediate steps.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
query: The query to run.
|
|
578
|
+
max_steps: Optional maximum number of steps to take.
|
|
579
|
+
manage_connector: Whether to handle the connector lifecycle internally.
|
|
580
|
+
If True, this method will connect, initialize, and disconnect from
|
|
581
|
+
the connector automatically. If False, the caller is responsible
|
|
582
|
+
for managing the connector lifecycle.
|
|
583
|
+
external_history: Optional external history to use instead of the
|
|
584
|
+
internal conversation history.
|
|
585
|
+
|
|
586
|
+
Returns:
|
|
587
|
+
The result of running the query.
|
|
588
|
+
"""
|
|
589
|
+
success = True
|
|
590
|
+
start_time = time.time()
|
|
591
|
+
generator = self.stream(query, max_steps, manage_connector, external_history, track_execution=False)
|
|
592
|
+
try:
|
|
593
|
+
result, steps_taken, tools_used_names = await self._consume_and_return(generator)
|
|
594
|
+
except Exception as e:
|
|
595
|
+
success = False
|
|
596
|
+
error = str(e)
|
|
597
|
+
logger.error(f"❌ Error during agent execution: {e}")
|
|
598
|
+
raise
|
|
599
|
+
finally:
|
|
620
600
|
self.telemetry.track_agent_execution(
|
|
621
601
|
execution_method="run",
|
|
622
602
|
query=query,
|
|
623
603
|
success=success,
|
|
624
604
|
model_provider=self._model_provider,
|
|
625
605
|
model_name=self._model_name,
|
|
626
|
-
server_count=
|
|
606
|
+
server_count=len(self.client.get_all_active_sessions()) if self.client else len(self.connectors),
|
|
627
607
|
server_identifiers=[connector.public_identifier for connector in self.connectors],
|
|
628
608
|
total_tools_available=len(self._tools) if self._tools else 0,
|
|
629
609
|
tools_available_names=[tool.name for tool in self._tools],
|
|
@@ -637,16 +617,132 @@ class MCPAgent:
|
|
|
637
617
|
tools_used_count=len(tools_used_names),
|
|
638
618
|
tools_used_names=tools_used_names,
|
|
639
619
|
response=result,
|
|
620
|
+
execution_time_ms=int((time.time() - start_time) * 1000),
|
|
621
|
+
error_type=error,
|
|
622
|
+
conversation_history_length=len(self._conversation_history),
|
|
623
|
+
)
|
|
624
|
+
return result
|
|
625
|
+
|
|
626
|
+
async def _generate_response_chunks_async(
|
|
627
|
+
self,
|
|
628
|
+
query: str,
|
|
629
|
+
max_steps: int | None = None,
|
|
630
|
+
manage_connector: bool = True,
|
|
631
|
+
external_history: list[BaseMessage] | None = None,
|
|
632
|
+
) -> AsyncIterator[StreamEvent]:
|
|
633
|
+
"""Internal async generator yielding response chunks.
|
|
634
|
+
|
|
635
|
+
The implementation purposefully keeps the logic compact:
|
|
636
|
+
1. Ensure the agent is initialised (optionally handling connector
|
|
637
|
+
lifecycle).
|
|
638
|
+
2. Forward the *same* inputs we use for ``run`` to LangChain's
|
|
639
|
+
``AgentExecutor.astream``.
|
|
640
|
+
3. Diff the growing ``output`` field coming from LangChain and yield
|
|
641
|
+
only the new part so the caller receives *incremental* chunks.
|
|
642
|
+
4. Persist conversation history when memory is enabled.
|
|
643
|
+
"""
|
|
644
|
+
|
|
645
|
+
# 1. Initialise on-demand ------------------------------------------------
|
|
646
|
+
initialised_here = False
|
|
647
|
+
if (manage_connector and not self._initialized) or (not self._initialized and self.auto_initialize):
|
|
648
|
+
await self.initialize()
|
|
649
|
+
initialised_here = True
|
|
650
|
+
|
|
651
|
+
if not self._agent_executor:
|
|
652
|
+
raise RuntimeError("MCP agent failed to initialise – call initialise() first?")
|
|
653
|
+
|
|
654
|
+
# 2. Build inputs --------------------------------------------------------
|
|
655
|
+
effective_max_steps = max_steps or self.max_steps
|
|
656
|
+
self._agent_executor.max_iterations = effective_max_steps
|
|
657
|
+
|
|
658
|
+
if self.memory_enabled:
|
|
659
|
+
self.add_to_history(HumanMessage(content=query))
|
|
660
|
+
|
|
661
|
+
history_to_use = external_history if external_history is not None else self._conversation_history
|
|
662
|
+
inputs = {"input": query, "chat_history": history_to_use}
|
|
663
|
+
|
|
664
|
+
# 3. Stream & diff -------------------------------------------------------
|
|
665
|
+
async for event in self._agent_executor.astream_events(inputs):
|
|
666
|
+
if event.get("event") == "on_chain_end":
|
|
667
|
+
output = event["data"]["output"]
|
|
668
|
+
if isinstance(output, list):
|
|
669
|
+
for message in output:
|
|
670
|
+
if not isinstance(message, ToolAgentAction):
|
|
671
|
+
self.add_to_history(message)
|
|
672
|
+
yield event
|
|
673
|
+
# 5. House-keeping -------------------------------------------------------
|
|
674
|
+
# Restrict agent cleanup in _generate_response_chunks_async to only occur
|
|
675
|
+
# when the agent was initialized in this generator and is not client-managed
|
|
676
|
+
# and the user does want us to manage the connection.
|
|
677
|
+
if not self.client and initialised_here and manage_connector:
|
|
678
|
+
logger.info("🧹 Closing agent after generator completion")
|
|
679
|
+
await self.close()
|
|
680
|
+
|
|
681
|
+
async def stream_events(
|
|
682
|
+
self,
|
|
683
|
+
query: str,
|
|
684
|
+
max_steps: int | None = None,
|
|
685
|
+
manage_connector: bool = True,
|
|
686
|
+
external_history: list[BaseMessage] | None = None,
|
|
687
|
+
) -> AsyncIterator[str]:
|
|
688
|
+
"""Asynchronous streaming interface.
|
|
689
|
+
|
|
690
|
+
Example::
|
|
691
|
+
|
|
692
|
+
async for chunk in agent.astream("hello"):
|
|
693
|
+
print(chunk, end="|", flush=True)
|
|
694
|
+
"""
|
|
695
|
+
start_time = time.time()
|
|
696
|
+
success = False
|
|
697
|
+
chunk_count = 0
|
|
698
|
+
total_response_length = 0
|
|
699
|
+
|
|
700
|
+
try:
|
|
701
|
+
async for chunk in self._generate_response_chunks_async(
|
|
702
|
+
query=query,
|
|
703
|
+
max_steps=max_steps,
|
|
704
|
+
manage_connector=manage_connector,
|
|
705
|
+
external_history=external_history,
|
|
706
|
+
):
|
|
707
|
+
chunk_count += 1
|
|
708
|
+
if isinstance(chunk, str):
|
|
709
|
+
total_response_length += len(chunk)
|
|
710
|
+
yield chunk
|
|
711
|
+
success = True
|
|
712
|
+
finally:
|
|
713
|
+
# Track comprehensive execution data for streaming
|
|
714
|
+
execution_time_ms = int((time.time() - start_time) * 1000)
|
|
715
|
+
|
|
716
|
+
server_count = 0
|
|
717
|
+
if self.client:
|
|
718
|
+
server_count = len(self.client.get_all_active_sessions())
|
|
719
|
+
elif self.connectors:
|
|
720
|
+
server_count = len(self.connectors)
|
|
721
|
+
|
|
722
|
+
conversation_history_length = len(self._conversation_history) if self.memory_enabled else 0
|
|
723
|
+
|
|
724
|
+
self.telemetry.track_agent_execution(
|
|
725
|
+
execution_method="stream_events",
|
|
726
|
+
query=query,
|
|
727
|
+
success=success,
|
|
728
|
+
model_provider=self._model_provider,
|
|
729
|
+
model_name=self._model_name,
|
|
730
|
+
server_count=server_count,
|
|
731
|
+
server_identifiers=[connector.public_identifier for connector in self.connectors],
|
|
732
|
+
total_tools_available=len(self._tools) if self._tools else 0,
|
|
733
|
+
tools_available_names=[tool.name for tool in self._tools],
|
|
734
|
+
max_steps_configured=self.max_steps,
|
|
735
|
+
memory_enabled=self.memory_enabled,
|
|
736
|
+
use_server_manager=self.use_server_manager,
|
|
737
|
+
max_steps_used=max_steps,
|
|
738
|
+
manage_connector=manage_connector,
|
|
739
|
+
external_history_used=external_history is not None,
|
|
740
|
+
response=f"[STREAMED RESPONSE - {total_response_length} chars]",
|
|
640
741
|
execution_time_ms=execution_time_ms,
|
|
641
|
-
error_type=None if success else "
|
|
742
|
+
error_type=None if success else "streaming_error",
|
|
642
743
|
conversation_history_length=conversation_history_length,
|
|
643
744
|
)
|
|
644
745
|
|
|
645
|
-
# Clean up if necessary (e.g., if not using client-managed sessions)
|
|
646
|
-
if manage_connector and not self.client and not initialized_here:
|
|
647
|
-
logger.info("🧹 Closing agent after query completion")
|
|
648
|
-
await self.close()
|
|
649
|
-
|
|
650
746
|
async def close(self) -> None:
|
|
651
747
|
"""Close the MCP connection with improved error handling."""
|
|
652
748
|
logger.info("🔌 Closing agent and cleaning up resources...")
|
|
@@ -659,7 +755,8 @@ class MCPAgent:
|
|
|
659
755
|
if self.client:
|
|
660
756
|
logger.info("🔄 Closing sessions through client")
|
|
661
757
|
await self.client.close_all_sessions()
|
|
662
|
-
self
|
|
758
|
+
if hasattr(self, "_sessions"):
|
|
759
|
+
self._sessions = {}
|
|
663
760
|
# If using direct connector, disconnect
|
|
664
761
|
elif self.connectors:
|
|
665
762
|
for connector in self.connectors:
|
|
@@ -677,6 +774,8 @@ class MCPAgent:
|
|
|
677
774
|
logger.error(f"❌ Error during agent closure: {e}")
|
|
678
775
|
# Still try to clean up references even if there was an error
|
|
679
776
|
self._agent_executor = None
|
|
680
|
-
self
|
|
681
|
-
|
|
777
|
+
if hasattr(self, "_tools"):
|
|
778
|
+
self._tools = []
|
|
779
|
+
if hasattr(self, "_sessions"):
|
|
780
|
+
self._sessions = {}
|
|
682
781
|
self._initialized = False
|
mcp_use/client.py
CHANGED
|
@@ -189,9 +189,7 @@ class MCPClient:
|
|
|
189
189
|
|
|
190
190
|
# Create sessions for all servers
|
|
191
191
|
for name in servers:
|
|
192
|
-
|
|
193
|
-
if auto_initialize:
|
|
194
|
-
await session.initialize()
|
|
192
|
+
await self.create_session(name, auto_initialize)
|
|
195
193
|
|
|
196
194
|
return self.sessions
|
|
197
195
|
|
mcp_use/connectors/base.py
CHANGED
|
@@ -11,6 +11,7 @@ from typing import Any
|
|
|
11
11
|
from mcp import ClientSession
|
|
12
12
|
from mcp.shared.exceptions import McpError
|
|
13
13
|
from mcp.types import CallToolResult, GetPromptResult, Prompt, ReadResourceResult, Resource, Tool
|
|
14
|
+
from pydantic import AnyUrl
|
|
14
15
|
|
|
15
16
|
from ..logging import logger
|
|
16
17
|
from ..task_managers import ConnectionManager
|
|
@@ -30,6 +31,7 @@ class BaseConnector(ABC):
|
|
|
30
31
|
self._resources: list[Resource] | None = None
|
|
31
32
|
self._prompts: list[Prompt] | None = None
|
|
32
33
|
self._connected = False
|
|
34
|
+
self._initialized = False # Track if client_session.initialize() has been called
|
|
33
35
|
self.auto_reconnect = True # Whether to automatically reconnect on connection loss (not configurable for now)
|
|
34
36
|
|
|
35
37
|
@abstractmethod
|
|
@@ -86,6 +88,7 @@ class BaseConnector(ABC):
|
|
|
86
88
|
self._tools = None
|
|
87
89
|
self._resources = None
|
|
88
90
|
self._prompts = None
|
|
91
|
+
self._initialized = False # Reset initialization flag
|
|
89
92
|
|
|
90
93
|
if errors:
|
|
91
94
|
logger.warning(f"Encountered {len(errors)} errors during resource cleanup")
|
|
@@ -95,38 +98,40 @@ class BaseConnector(ABC):
|
|
|
95
98
|
if not self.client_session:
|
|
96
99
|
raise RuntimeError("MCP client is not connected")
|
|
97
100
|
|
|
98
|
-
|
|
101
|
+
# Check if already initialized
|
|
102
|
+
if self._initialized:
|
|
103
|
+
return {"status": "already_initialized"}
|
|
99
104
|
|
|
100
105
|
# Initialize the session
|
|
101
106
|
result = await self.client_session.initialize()
|
|
107
|
+
self._initialized = True # Mark as initialized
|
|
102
108
|
|
|
103
109
|
server_capabilities = result.capabilities
|
|
104
110
|
|
|
105
111
|
if server_capabilities.tools:
|
|
106
|
-
# Get available tools
|
|
107
|
-
tools_result = await self.list_tools()
|
|
108
|
-
self._tools = tools_result
|
|
112
|
+
# Get available tools directly from client session
|
|
113
|
+
tools_result = await self.client_session.list_tools()
|
|
114
|
+
self._tools = tools_result.tools if tools_result else []
|
|
109
115
|
else:
|
|
110
116
|
self._tools = []
|
|
111
117
|
|
|
112
118
|
if server_capabilities.resources:
|
|
113
|
-
# Get available resources
|
|
114
|
-
resources_result = await self.list_resources()
|
|
115
|
-
self._resources = resources_result
|
|
119
|
+
# Get available resources directly from client session
|
|
120
|
+
resources_result = await self.client_session.list_resources()
|
|
121
|
+
self._resources = resources_result.resources if resources_result else []
|
|
116
122
|
else:
|
|
117
123
|
self._resources = []
|
|
118
124
|
|
|
119
125
|
if server_capabilities.prompts:
|
|
120
|
-
# Get available prompts
|
|
121
|
-
prompts_result = await self.list_prompts()
|
|
122
|
-
self._prompts = prompts_result
|
|
126
|
+
# Get available prompts directly from client session
|
|
127
|
+
prompts_result = await self.client_session.list_prompts()
|
|
128
|
+
self._prompts = prompts_result.prompts if prompts_result else []
|
|
123
129
|
else:
|
|
124
130
|
self._prompts = []
|
|
125
131
|
|
|
126
132
|
logger.debug(
|
|
127
133
|
f"MCP session initialized with {len(self._tools)} tools, "
|
|
128
|
-
|
|
129
|
-
f"and {len(self._prompts)} prompts"
|
|
134
|
+
"{len(self._resources)} resources, and {len(self._prompts)} prompts"
|
|
130
135
|
)
|
|
131
136
|
|
|
132
137
|
return result
|
|
@@ -287,10 +292,9 @@ class BaseConnector(ABC):
|
|
|
287
292
|
logger.error(f"Error listing resources: {e}")
|
|
288
293
|
return []
|
|
289
294
|
|
|
290
|
-
async def read_resource(self, uri:
|
|
295
|
+
async def read_resource(self, uri: AnyUrl) -> ReadResourceResult:
|
|
291
296
|
"""Read a resource by URI."""
|
|
292
|
-
|
|
293
|
-
raise RuntimeError("MCP client is not connected")
|
|
297
|
+
await self._ensure_connected()
|
|
294
298
|
|
|
295
299
|
logger.debug(f"Reading resource: {uri}")
|
|
296
300
|
result = await self.client_session.read_resource(uri)
|
|
@@ -298,7 +302,6 @@ class BaseConnector(ABC):
|
|
|
298
302
|
|
|
299
303
|
async def list_prompts(self) -> list[Prompt]:
|
|
300
304
|
"""List all available prompts from the MCP implementation."""
|
|
301
|
-
# Ensure we're connected
|
|
302
305
|
await self._ensure_connected()
|
|
303
306
|
|
|
304
307
|
logger.debug("Listing prompts")
|
|
@@ -311,7 +314,6 @@ class BaseConnector(ABC):
|
|
|
311
314
|
|
|
312
315
|
async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> GetPromptResult:
|
|
313
316
|
"""Get a prompt by name."""
|
|
314
|
-
# Ensure we're connected
|
|
315
317
|
await self._ensure_connected()
|
|
316
318
|
|
|
317
319
|
logger.debug(f"Getting prompt: {name}")
|
|
@@ -320,7 +322,6 @@ class BaseConnector(ABC):
|
|
|
320
322
|
|
|
321
323
|
async def request(self, method: str, params: dict[str, Any] | None = None) -> Any:
|
|
322
324
|
"""Send a raw request to the MCP implementation."""
|
|
323
|
-
# Ensure we're connected
|
|
324
325
|
await self._ensure_connected()
|
|
325
326
|
|
|
326
327
|
logger.debug(f"Sending request: {method} with params: {params}")
|
mcp_use/connectors/http.py
CHANGED
|
@@ -81,12 +81,37 @@ class HttpConnector(BaseConnector):
|
|
|
81
81
|
|
|
82
82
|
try:
|
|
83
83
|
# Try to initialize - this is where streamable HTTP vs SSE difference should show up
|
|
84
|
-
await test_client.initialize()
|
|
84
|
+
result = await test_client.initialize()
|
|
85
85
|
|
|
86
86
|
# If we get here, streamable HTTP works
|
|
87
87
|
|
|
88
88
|
self.client_session = test_client
|
|
89
89
|
self.transport_type = "streamable HTTP"
|
|
90
|
+
self._initialized = True # Mark as initialized since we just called initialize()
|
|
91
|
+
|
|
92
|
+
# Populate tools, resources, and prompts since we've initialized
|
|
93
|
+
server_capabilities = result.capabilities
|
|
94
|
+
|
|
95
|
+
if server_capabilities.tools:
|
|
96
|
+
# Get available tools directly from client session
|
|
97
|
+
tools_result = await self.client_session.list_tools()
|
|
98
|
+
self._tools = tools_result.tools if tools_result else []
|
|
99
|
+
else:
|
|
100
|
+
self._tools = []
|
|
101
|
+
|
|
102
|
+
if server_capabilities.resources:
|
|
103
|
+
# Get available resources directly from client session
|
|
104
|
+
resources_result = await self.client_session.list_resources()
|
|
105
|
+
self._resources = resources_result.resources if resources_result else []
|
|
106
|
+
else:
|
|
107
|
+
self._resources = []
|
|
108
|
+
|
|
109
|
+
if server_capabilities.prompts:
|
|
110
|
+
# Get available prompts directly from client session
|
|
111
|
+
prompts_result = await self.client_session.list_prompts()
|
|
112
|
+
self._prompts = prompts_result.prompts if prompts_result else []
|
|
113
|
+
else:
|
|
114
|
+
self._prompts = []
|
|
90
115
|
|
|
91
116
|
except Exception as init_error:
|
|
92
117
|
# Clean up the test client
|
mcp_use/connectors/sandbox.py
CHANGED
|
@@ -65,9 +65,8 @@ class SandboxConnector(BaseConnector):
|
|
|
65
65
|
super().__init__()
|
|
66
66
|
if Sandbox is None:
|
|
67
67
|
raise ImportError(
|
|
68
|
-
"E2B SDK (e2b-code-interpreter) not found. "
|
|
69
|
-
"
|
|
70
|
-
"(or 'pip install e2b-code-interpreter')."
|
|
68
|
+
"E2B SDK (e2b-code-interpreter) not found. Please install it with "
|
|
69
|
+
"'pip install mcp-use[e2b]' (or 'pip install e2b-code-interpreter')."
|
|
71
70
|
)
|
|
72
71
|
|
|
73
72
|
self.user_command = command
|
|
@@ -79,8 +78,8 @@ class SandboxConnector(BaseConnector):
|
|
|
79
78
|
self.api_key = _e2b_options.get("api_key") or os.environ.get("E2B_API_KEY")
|
|
80
79
|
if not self.api_key:
|
|
81
80
|
raise ValueError(
|
|
82
|
-
"E2B API key is required. Provide it via 'sandbox_options.api_key'
|
|
83
|
-
"or the E2B_API_KEY environment variable."
|
|
81
|
+
"E2B API key is required. Provide it via 'sandbox_options.api_key'"
|
|
82
|
+
" or the E2B_API_KEY environment variable."
|
|
84
83
|
)
|
|
85
84
|
|
|
86
85
|
self.sandbox_template_id = _e2b_options.get("sandbox_template_id", "base")
|
|
@@ -88,7 +87,7 @@ class SandboxConnector(BaseConnector):
|
|
|
88
87
|
|
|
89
88
|
self.sandbox: Sandbox | None = None
|
|
90
89
|
self.process: CommandHandle | None = None
|
|
91
|
-
self.
|
|
90
|
+
self.client_session: ClientSession | None = None
|
|
92
91
|
self.errlog = sys.stderr
|
|
93
92
|
self.base_url: str | None = None
|
|
94
93
|
self._connected = False
|
|
@@ -218,8 +217,8 @@ class SandboxConnector(BaseConnector):
|
|
|
218
217
|
read_stream, write_stream = await self._connection_manager.start()
|
|
219
218
|
|
|
220
219
|
# Create the client session
|
|
221
|
-
self.
|
|
222
|
-
await self.
|
|
220
|
+
self.client_session = ClientSession(read_stream, write_stream, sampling_callback=None)
|
|
221
|
+
await self.client_session.__aenter__()
|
|
223
222
|
|
|
224
223
|
# Mark as connected
|
|
225
224
|
self._connected = True
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import math
|
|
2
3
|
import time
|
|
3
4
|
from typing import ClassVar
|
|
4
5
|
|
|
5
|
-
import numpy as np
|
|
6
6
|
from langchain_core.tools import BaseTool
|
|
7
7
|
from pydantic import BaseModel, Field
|
|
8
8
|
|
|
@@ -205,10 +205,8 @@ class ToolSearchEngine:
|
|
|
205
205
|
# Calculate similarity scores
|
|
206
206
|
scores = {}
|
|
207
207
|
for tool_name, embedding in self.tool_embeddings.items():
|
|
208
|
-
# Calculate cosine similarity
|
|
209
|
-
similarity =
|
|
210
|
-
np.linalg.norm(query_embedding) * np.linalg.norm(embedding)
|
|
211
|
-
)
|
|
208
|
+
# Calculate cosine similarity using pure Python
|
|
209
|
+
similarity = self._cosine_similarity(query_embedding, embedding)
|
|
212
210
|
scores[tool_name] = float(similarity)
|
|
213
211
|
|
|
214
212
|
# Sort by score and get top_k results
|
|
@@ -304,3 +302,27 @@ class ToolSearchEngine:
|
|
|
304
302
|
formatted_output += "\nTo use a tool, connect to the appropriate server first, then invoke the tool."
|
|
305
303
|
|
|
306
304
|
return formatted_output
|
|
305
|
+
|
|
306
|
+
def _cosine_similarity(self, vec1: list[float], vec2: list[float]) -> float:
|
|
307
|
+
"""Calculate cosine similarity between two vectors.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
vec1: First vector
|
|
311
|
+
vec2: Second vector
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Cosine similarity between the vectors
|
|
315
|
+
"""
|
|
316
|
+
# Calculate dot product
|
|
317
|
+
dot_product = sum(a * b for a, b in zip(vec1, vec2, strict=False))
|
|
318
|
+
|
|
319
|
+
# Calculate magnitudes
|
|
320
|
+
magnitude1 = math.sqrt(sum(a * a for a in vec1))
|
|
321
|
+
magnitude2 = math.sqrt(sum(b * b for b in vec2))
|
|
322
|
+
|
|
323
|
+
# Avoid division by zero
|
|
324
|
+
if magnitude1 == 0 or magnitude2 == 0:
|
|
325
|
+
return 0.0
|
|
326
|
+
|
|
327
|
+
# Calculate cosine similarity
|
|
328
|
+
return dot_product / (magnitude1 * magnitude2)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcp-use
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.5
|
|
4
4
|
Summary: MCP Library for LLMs
|
|
5
5
|
Author-email: Pietro Zullo <pietro.zullo@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -15,20 +15,13 @@ Classifier: Programming Language :: Python :: 3.12
|
|
|
15
15
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
16
16
|
Requires-Python: >=3.11
|
|
17
17
|
Requires-Dist: aiohttp>=3.9.0
|
|
18
|
-
Requires-Dist: anyio>=4.0.0
|
|
19
|
-
Requires-Dist: authlib>=1.5.2
|
|
20
|
-
Requires-Dist: httpx>=0.28.1
|
|
21
18
|
Requires-Dist: jsonschema-pydantic>=0.1.0
|
|
22
|
-
Requires-Dist: langchain-community>=0.0.10
|
|
23
19
|
Requires-Dist: langchain>=0.1.0
|
|
24
20
|
Requires-Dist: mcp>=1.9.3
|
|
25
21
|
Requires-Dist: posthog>=4.8.0
|
|
26
22
|
Requires-Dist: pydantic>=2.0.0
|
|
27
23
|
Requires-Dist: python-dotenv>=1.0.0
|
|
28
24
|
Requires-Dist: scarf-sdk>=0.1.0
|
|
29
|
-
Requires-Dist: starlette>=0.41.0
|
|
30
|
-
Requires-Dist: typing-extensions>=4.8.0
|
|
31
|
-
Requires-Dist: uvicorn>=0.32.0
|
|
32
25
|
Requires-Dist: websockets>=12.0
|
|
33
26
|
Provides-Extra: anthropic
|
|
34
27
|
Requires-Dist: langchain-anthropic; extra == 'anthropic'
|
|
@@ -50,6 +43,7 @@ Provides-Extra: search
|
|
|
50
43
|
Requires-Dist: fastembed>=0.0.1; extra == 'search'
|
|
51
44
|
Description-Content-Type: text/markdown
|
|
52
45
|
|
|
46
|
+
<div align="center">
|
|
53
47
|
<div align="center" style="margin: 0 auto; max-width: 80%;">
|
|
54
48
|
<picture>
|
|
55
49
|
<source media="(prefers-color-scheme: dark)" srcset="static/logo_white.svg">
|
|
@@ -58,24 +52,23 @@ Description-Content-Type: text/markdown
|
|
|
58
52
|
</picture>
|
|
59
53
|
</div>
|
|
60
54
|
|
|
61
|
-
<
|
|
55
|
+
<br>
|
|
56
|
+
|
|
57
|
+
# Connect any LLM to any MCP server
|
|
58
|
+
|
|
62
59
|
<p align="center">
|
|
60
|
+
<a href="https://github.com/pietrozullo/mcp-use/stargazers" alt="GitHub stars">
|
|
61
|
+
<img src="https://img.shields.io/github/stars/pietrozullo/mcp-use?style=social" /></a>
|
|
63
62
|
<a href="https://pypi.org/project/mcp_use/" alt="PyPI Version">
|
|
64
63
|
<img src="https://img.shields.io/pypi/v/mcp_use.svg"/></a>
|
|
65
|
-
<a href="https://pypi.org/project/mcp_use/" alt="PyPI Downloads">
|
|
66
|
-
<img src="https://static.pepy.tech/badge/mcp-use" /></a>
|
|
67
|
-
<a href="https://pypi.org/project/mcp_use/" alt="Python Versions">
|
|
68
|
-
<img src="https://img.shields.io/pypi/pyversions/mcp_use.svg" /></a>
|
|
69
|
-
<a href="https://docs.mcp-use.io" alt="Documentation">
|
|
70
|
-
<img src="https://img.shields.io/badge/docs-mcp--use.io-blue" /></a>
|
|
71
|
-
<a href="https://mcp-use.io" alt="Website">
|
|
72
|
-
<img src="https://img.shields.io/badge/website-mcp--use.io-blue" /></a>
|
|
73
64
|
<a href="https://github.com/pietrozullo/mcp-use/blob/main/LICENSE" alt="License">
|
|
74
65
|
<img src="https://img.shields.io/github/license/pietrozullo/mcp-use" /></a>
|
|
75
|
-
<a href="https://
|
|
76
|
-
<img src="https://
|
|
77
|
-
<a href="https://
|
|
78
|
-
<img src="https://img.shields.io/
|
|
66
|
+
<a href="https://pypi.org/project/mcp_use/" alt="PyPI Downloads">
|
|
67
|
+
<img src="https://static.pepy.tech/badge/mcp-use" /></a>
|
|
68
|
+
<a href="https://docs.mcp-use.com" alt="Documentation">
|
|
69
|
+
<img src="https://img.shields.io/badge/docs-mcp--use.com-blue" /></a>
|
|
70
|
+
<a href="https://mcp-use.com" alt="Website">
|
|
71
|
+
<img src="https://img.shields.io/badge/website-mcp--use.com-blue" /></a>
|
|
79
72
|
</p>
|
|
80
73
|
<p align="center">
|
|
81
74
|
<a href="https://x.com/pietrozullo" alt="Twitter Follow - Pietro">
|
|
@@ -85,15 +78,19 @@ Description-Content-Type: text/markdown
|
|
|
85
78
|
<a href="https://discord.gg/XkNkSkMz3V" alt="Discord">
|
|
86
79
|
<img src="https://dcbadge.limes.pink/api/server/XkNkSkMz3V?style=flat" /></a>
|
|
87
80
|
</p>
|
|
88
|
-
|
|
81
|
+
</div>
|
|
89
82
|
|
|
90
|
-
|
|
83
|
+
🌐 MCP-Use is the open source way to connect **any LLM to any MCP server** and build custom MCP agents that have tool access, without using closed source or application clients.
|
|
91
84
|
|
|
92
85
|
💡 Let developers easily connect any LLM to tools like web browsing, file operations, and more.
|
|
93
86
|
|
|
87
|
+
- Visit the [mcp-use.com website](https://mcp-use.com/) to know how to build and deploy MCP agents.
|
|
88
|
+
- Visit the [mcp-use docs](https://docs.mcp-use.com/) to get started with mcp-use library
|
|
89
|
+
|
|
90
|
+
💬 Get started quickly - chat with your servers on our <b>hosted version</b>! [Try mcp-use chat (beta)](https://chat.mcp-use.com).
|
|
91
|
+
|
|
94
92
|
# Features
|
|
95
93
|
|
|
96
|
-
## ✨ Key Features
|
|
97
94
|
<table>
|
|
98
95
|
<tr>
|
|
99
96
|
<th width="400">Feature</th>
|
|
@@ -108,8 +105,8 @@ Description-Content-Type: text/markdown
|
|
|
108
105
|
<td>Works with any langchain supported LLM that supports tool calling (OpenAI, Anthropic, Groq, LLama etc.)</td>
|
|
109
106
|
</tr>
|
|
110
107
|
<tr>
|
|
111
|
-
<td>🌐 <a href="https://mcp-use.
|
|
112
|
-
<td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.
|
|
108
|
+
<td>🌐 <a href="https://mcp-use.com/builder"><strong>Code Builder</strong></a></td>
|
|
109
|
+
<td>Explore MCP capabilities and generate starter code with the interactive <a href="https://mcp-use.com/builder">code builder</a>.</td>
|
|
113
110
|
</tr>
|
|
114
111
|
<tr>
|
|
115
112
|
<td>🔗 <a href="#http-connection-example"><strong>HTTP Support</strong></a></td>
|
|
@@ -132,7 +129,7 @@ Description-Content-Type: text/markdown
|
|
|
132
129
|
<td>Build your own agents with any framework using the LangChain adapter or create new adapters</td>
|
|
133
130
|
</tr>
|
|
134
131
|
<tr>
|
|
135
|
-
<td>❓ <a href="https://mcp-use.
|
|
132
|
+
<td>❓ <a href="https://mcp-use.com/what-should-we-build-next"><strong>What should we build next</strong></a></td>
|
|
136
133
|
<td>Let us know what you'd like us to build next</td>
|
|
137
134
|
</tr>
|
|
138
135
|
</table>
|
|
@@ -164,6 +161,7 @@ pip install langchain-openai
|
|
|
164
161
|
# For Anthropic
|
|
165
162
|
pip install langchain-anthropic
|
|
166
163
|
```
|
|
164
|
+
|
|
167
165
|
For other providers, check the [LangChain chat models documentation](https://python.langchain.com/docs/integrations/chat/) and add your API keys for the provider you want to use to your `.env` file.
|
|
168
166
|
|
|
169
167
|
```bash
|
|
@@ -719,7 +717,7 @@ async def main():
|
|
|
719
717
|
|
|
720
718
|
# Create a custom LangChain agent
|
|
721
719
|
llm_with_tools = llm.bind_tools(tools)
|
|
722
|
-
result = await llm_with_tools.ainvoke("What tools do you have
|
|
720
|
+
result = await llm_with_tools.ainvoke("What tools do you have available ? ")
|
|
723
721
|
print(result)
|
|
724
722
|
|
|
725
723
|
|
|
@@ -800,7 +798,6 @@ Thanks to all our amazing contributors!
|
|
|
800
798
|
<img src="https://contrib.rocks/image?repo=mcp-use/mcp-use" />
|
|
801
799
|
</a>
|
|
802
800
|
|
|
803
|
-
|
|
804
801
|
## Top Starred Dependents
|
|
805
802
|
|
|
806
803
|
<!-- gh-dependents-info-used-by-start -->
|
|
@@ -863,6 +860,7 @@ Thanks to all our amazing contributors!
|
|
|
863
860
|
# License
|
|
864
861
|
|
|
865
862
|
MIT
|
|
863
|
+
|
|
866
864
|
# Citation
|
|
867
865
|
|
|
868
866
|
If you use MCP-Use in your research or project, please cite:
|
|
@@ -876,3 +874,5 @@ If you use MCP-Use in your research or project, please cite:
|
|
|
876
874
|
url = {https://github.com/pietrozullo/mcp-use}
|
|
877
875
|
}
|
|
878
876
|
```
|
|
877
|
+
|
|
878
|
+
<img referrerpolicy="no-referrer-when-downgrade" src="https://static.scarf.sh/a.png?x-pxid=732589b6-6850-4b8c-aa25-906c0979e426&page=README.md" />
|
|
@@ -1,21 +1,21 @@
|
|
|
1
1
|
mcp_use/__init__.py,sha256=I3gFxw6Id45RksUBIZS1kxBW3ItjFXuAfoReJabpnW0,1055
|
|
2
|
-
mcp_use/client.py,sha256=
|
|
2
|
+
mcp_use/client.py,sha256=9u_GZxsyVoZY_g1dWGK8JiLmTatY6YvwGb4IM8idYZs,9391
|
|
3
3
|
mcp_use/config.py,sha256=jRjTVNMxi7pkqFHMJhzSWpwukE4PbdYU8Pe_IZ33sYI,2433
|
|
4
4
|
mcp_use/logging.py,sha256=CRtkPwR-bkXK_kQ0QOL86RikMWOHzEOi7A8VRHkNsZw,4270
|
|
5
5
|
mcp_use/session.py,sha256=4kwcB_IkTt_3FiBSTI1H17KhL1W_6N5oai3HTxFrTH4,2496
|
|
6
6
|
mcp_use/utils.py,sha256=QavJcVq2WxUUUCCpPCUeOB5bqIS0FFmpK-RAZkGc6aA,720
|
|
7
7
|
mcp_use/adapters/__init__.py,sha256=-xCrgPThuX7x0PHGFDdjb7M-mgw6QV3sKu5PM7ShnRg,275
|
|
8
|
-
mcp_use/adapters/base.py,sha256=
|
|
8
|
+
mcp_use/adapters/base.py,sha256=U1z_UzojC-bytb4ZuKTRpEgEp-2F_BVBgqEXbUqLYB4,6901
|
|
9
9
|
mcp_use/adapters/langchain_adapter.py,sha256=LdlpRyLORhl8NZvtAmisgPelXkhEbBErSNdGHb8SF18,10860
|
|
10
10
|
mcp_use/agents/__init__.py,sha256=N3eVYP2PxqNO2KcQv5fY8UMUX2W3eLTNkkzuFIJ1DUA,261
|
|
11
11
|
mcp_use/agents/base.py,sha256=EN-dRbwOi9vIqofFg3jmi5yT2VKlwEr9Cwi1DZgB3eE,1591
|
|
12
|
-
mcp_use/agents/mcpagent.py,sha256=
|
|
12
|
+
mcp_use/agents/mcpagent.py,sha256=ke912HbpkXxcv1DpjiFTEg3hoIEMSFlTS62IRxYaTDs,35656
|
|
13
13
|
mcp_use/agents/prompts/system_prompt_builder.py,sha256=E86STmxcl2Ic763_114awNqFB2RyLrQlbvgRmJajQjI,4116
|
|
14
14
|
mcp_use/agents/prompts/templates.py,sha256=AZKrGWuI516C-PmyOPvxDBibNdqJtN24sOHTGR06bi4,1933
|
|
15
15
|
mcp_use/connectors/__init__.py,sha256=cUF4yT0bNr8qeLkSzg28SHueiV5qDaHEB1l1GZ2K0dc,536
|
|
16
|
-
mcp_use/connectors/base.py,sha256=
|
|
17
|
-
mcp_use/connectors/http.py,sha256=
|
|
18
|
-
mcp_use/connectors/sandbox.py,sha256=
|
|
16
|
+
mcp_use/connectors/base.py,sha256=GnI2WCrcxjOaB0R6Hj9pTpAZ0I1YRwkz1gGt-tvuZa0,12609
|
|
17
|
+
mcp_use/connectors/http.py,sha256=oZbLLVDNwUaY6EwK2rVnjoZEKsBLf5-AXtPvGtbMmwc,7559
|
|
18
|
+
mcp_use/connectors/sandbox.py,sha256=cnybcNW55k-S0hUtRR1M3KcGXwnaeDMVm8wDTsfF1Mk,10875
|
|
19
19
|
mcp_use/connectors/stdio.py,sha256=rnJoLaHf1cIjk1KqfxfSsUs-iGTJ7KZonxgIc3kXeCM,2791
|
|
20
20
|
mcp_use/connectors/utils.py,sha256=zQ8GdNQx0Twz3by90BoU1RsWPf9wODGof4K3-NxPXeA,366
|
|
21
21
|
mcp_use/connectors/websocket.py,sha256=G7ZeLJNPVl9AG6kCmiNJz1N2Ing_QxT7pSswigTKi8Y,9650
|
|
@@ -27,7 +27,7 @@ mcp_use/managers/tools/connect_server.py,sha256=MGYQCl11q-w6gSIYuT44dDk7ILV3Oh7k
|
|
|
27
27
|
mcp_use/managers/tools/disconnect_server.py,sha256=Y3kJN31efzsjfJwxUhpBxS-bgU21DCfGbn_LgEbzyvI,1586
|
|
28
28
|
mcp_use/managers/tools/get_active_server.py,sha256=tCaib76gYU3L5G82tEOTq4Io2cuCXWjOjPselb-92i8,964
|
|
29
29
|
mcp_use/managers/tools/list_servers_tool.py,sha256=OPDSMNe-VuAhlUyhDnR4CiuZFpoMhnhWpAablwO5S0k,1897
|
|
30
|
-
mcp_use/managers/tools/search_tools.py,sha256=
|
|
30
|
+
mcp_use/managers/tools/search_tools.py,sha256=4vso7ln-AfG6lQAMq9FA_CyeVtSEDYEWlHtdHtfnLps,12911
|
|
31
31
|
mcp_use/managers/tools/use_tool.py,sha256=gMNjgJrI9XDitPyJglcJcAvowbEWkO5z57yt4DT2Lpc,6626
|
|
32
32
|
mcp_use/observability/__init__.py,sha256=kTUcP0d6L5_3ktfldhdAk-3AWckzVHs7ztG-R6cye64,186
|
|
33
33
|
mcp_use/observability/laminar.py,sha256=WWjmVXP55yCfAlqlayeuJmym1gdrv8is7UyrIp4Tbn0,839
|
|
@@ -43,7 +43,7 @@ mcp_use/telemetry/events.py,sha256=K5xqbmkum30r4gM2PWtTiUWGF8oZzGZw2DYwco1RfOQ,3
|
|
|
43
43
|
mcp_use/telemetry/telemetry.py,sha256=ck2MDFMtooafriR1W_zi41dWq-0O-ucF89pCkdkyc9E,11724
|
|
44
44
|
mcp_use/telemetry/utils.py,sha256=kDVTqt2oSeWNJbnTOlXOehr2yFO0PMyx2UGkrWkfJiw,1769
|
|
45
45
|
mcp_use/types/sandbox.py,sha256=opJ9r56F1FvaqVvPovfAj5jZbsOexgwYx5wLgSlN8_U,712
|
|
46
|
-
mcp_use-1.3.
|
|
47
|
-
mcp_use-1.3.
|
|
48
|
-
mcp_use-1.3.
|
|
49
|
-
mcp_use-1.3.
|
|
46
|
+
mcp_use-1.3.5.dist-info/METADATA,sha256=dMpM6hZi0cuGyKdbTVB5_YuUJhcZSIeBNn_riJSkbZ0,28534
|
|
47
|
+
mcp_use-1.3.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
48
|
+
mcp_use-1.3.5.dist-info/licenses/LICENSE,sha256=7Pw7dbwJSBw8zH-WE03JnR5uXvitRtaGTP9QWPcexcs,1068
|
|
49
|
+
mcp_use-1.3.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|