mcp-use 1.3.10__py3-none-any.whl → 1.3.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use/adapters/langchain_adapter.py +9 -52
- mcp_use/agents/mcpagent.py +88 -37
- mcp_use/agents/prompts/templates.py +1 -10
- mcp_use/agents/remote.py +154 -128
- mcp_use/auth/__init__.py +6 -0
- mcp_use/auth/bearer.py +17 -0
- mcp_use/auth/oauth.py +625 -0
- mcp_use/auth/oauth_callback.py +214 -0
- mcp_use/client.py +25 -1
- mcp_use/config.py +7 -2
- mcp_use/connectors/base.py +25 -12
- mcp_use/connectors/http.py +135 -27
- mcp_use/connectors/sandbox.py +12 -3
- mcp_use/connectors/stdio.py +11 -3
- mcp_use/connectors/websocket.py +15 -6
- mcp_use/exceptions.py +31 -0
- mcp_use/middleware/__init__.py +50 -0
- mcp_use/middleware/logging.py +31 -0
- mcp_use/middleware/metrics.py +314 -0
- mcp_use/middleware/middleware.py +262 -0
- mcp_use/task_managers/base.py +13 -23
- mcp_use/task_managers/sse.py +5 -0
- mcp_use/task_managers/streamable_http.py +5 -0
- {mcp_use-1.3.10.dist-info → mcp_use-1.3.12.dist-info}/METADATA +21 -25
- {mcp_use-1.3.10.dist-info → mcp_use-1.3.12.dist-info}/RECORD +28 -19
- {mcp_use-1.3.10.dist-info → mcp_use-1.3.12.dist-info}/WHEEL +0 -0
- {mcp_use-1.3.10.dist-info → mcp_use-1.3.12.dist-info}/entry_points.txt +0 -0
- {mcp_use-1.3.10.dist-info → mcp_use-1.3.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,15 +8,12 @@ import re
|
|
|
8
8
|
from typing import Any, NoReturn
|
|
9
9
|
|
|
10
10
|
from jsonschema_pydantic import jsonschema_to_pydantic
|
|
11
|
-
from langchain_core.tools import BaseTool
|
|
11
|
+
from langchain_core.tools import BaseTool
|
|
12
12
|
from mcp.types import (
|
|
13
13
|
CallToolResult,
|
|
14
|
-
EmbeddedResource,
|
|
15
|
-
ImageContent,
|
|
16
14
|
Prompt,
|
|
17
15
|
ReadResourceRequestParams,
|
|
18
16
|
Resource,
|
|
19
|
-
TextContent,
|
|
20
17
|
)
|
|
21
18
|
from pydantic import BaseModel, Field, create_model
|
|
22
19
|
|
|
@@ -39,7 +36,7 @@ class LangChainAdapter(BaseAdapter):
|
|
|
39
36
|
self._connector_tool_map: dict[BaseConnector, list[BaseTool]] = {}
|
|
40
37
|
|
|
41
38
|
def fix_schema(self, schema: dict) -> dict:
|
|
42
|
-
"""Convert JSON Schema 'type': ['string', 'null'] to 'anyOf' format.
|
|
39
|
+
"""Convert JSON Schema 'type': ['string', 'null'] to 'anyOf' format and fix enum handling.
|
|
43
40
|
|
|
44
41
|
Args:
|
|
45
42
|
schema: The JSON schema to fix.
|
|
@@ -51,54 +48,15 @@ class LangChainAdapter(BaseAdapter):
|
|
|
51
48
|
if "type" in schema and isinstance(schema["type"], list):
|
|
52
49
|
schema["anyOf"] = [{"type": t} for t in schema["type"]]
|
|
53
50
|
del schema["type"] # Remove 'type' and standardize to 'anyOf'
|
|
51
|
+
|
|
52
|
+
# Fix enum handling - ensure enum fields are properly typed as strings
|
|
53
|
+
if "enum" in schema and "type" not in schema:
|
|
54
|
+
schema["type"] = "string"
|
|
55
|
+
|
|
54
56
|
for key, value in schema.items():
|
|
55
57
|
schema[key] = self.fix_schema(value) # Apply recursively
|
|
56
58
|
return schema
|
|
57
59
|
|
|
58
|
-
def _parse_mcp_tool_result(self, tool_result: CallToolResult) -> str:
|
|
59
|
-
"""Parse the content of a CallToolResult into a string.
|
|
60
|
-
|
|
61
|
-
Args:
|
|
62
|
-
tool_result: The result object from calling an MCP tool.
|
|
63
|
-
|
|
64
|
-
Returns:
|
|
65
|
-
A string representation of the tool result content.
|
|
66
|
-
|
|
67
|
-
Raises:
|
|
68
|
-
ToolException: If the tool execution failed, returned no content,
|
|
69
|
-
or contained unexpected content types.
|
|
70
|
-
"""
|
|
71
|
-
if tool_result.isError:
|
|
72
|
-
raise ToolException(f"Tool execution failed: {tool_result.content}")
|
|
73
|
-
|
|
74
|
-
if not tool_result.content:
|
|
75
|
-
raise ToolException("Tool execution returned no content")
|
|
76
|
-
|
|
77
|
-
decoded_result = ""
|
|
78
|
-
for item in tool_result.content:
|
|
79
|
-
match item.type:
|
|
80
|
-
case "text":
|
|
81
|
-
item: TextContent
|
|
82
|
-
decoded_result += item.text
|
|
83
|
-
case "image":
|
|
84
|
-
item: ImageContent
|
|
85
|
-
decoded_result += item.data # Assuming data is string-like or base64
|
|
86
|
-
case "resource":
|
|
87
|
-
resource: EmbeddedResource = item.resource
|
|
88
|
-
if hasattr(resource, "text"):
|
|
89
|
-
decoded_result += resource.text
|
|
90
|
-
elif hasattr(resource, "blob"):
|
|
91
|
-
# Assuming blob needs decoding or specific handling; adjust as needed
|
|
92
|
-
decoded_result += (
|
|
93
|
-
resource.blob.decode() if isinstance(resource.blob, bytes) else str(resource.blob)
|
|
94
|
-
)
|
|
95
|
-
else:
|
|
96
|
-
raise ToolException(f"Unexpected resource type: {resource.type}")
|
|
97
|
-
case _:
|
|
98
|
-
raise ToolException(f"Unexpected content type: {item.type}")
|
|
99
|
-
|
|
100
|
-
return decoded_result
|
|
101
|
-
|
|
102
60
|
def _convert_tool(self, mcp_tool: dict[str, Any], connector: BaseConnector) -> BaseTool:
|
|
103
61
|
"""Convert an MCP tool to LangChain's tool format.
|
|
104
62
|
|
|
@@ -138,7 +96,7 @@ class LangChainAdapter(BaseAdapter):
|
|
|
138
96
|
"""
|
|
139
97
|
raise NotImplementedError("MCP tools only support async operations")
|
|
140
98
|
|
|
141
|
-
async def _arun(self, **kwargs: Any) ->
|
|
99
|
+
async def _arun(self, **kwargs: Any) -> str | dict:
|
|
142
100
|
"""Asynchronously execute the tool with given arguments.
|
|
143
101
|
|
|
144
102
|
Args:
|
|
@@ -155,8 +113,7 @@ class LangChainAdapter(BaseAdapter):
|
|
|
155
113
|
try:
|
|
156
114
|
tool_result: CallToolResult = await self.tool_connector.call_tool(self.name, kwargs)
|
|
157
115
|
try:
|
|
158
|
-
|
|
159
|
-
return adapter_self._parse_mcp_tool_result(tool_result)
|
|
116
|
+
return str(tool_result.content)
|
|
160
117
|
except Exception as e:
|
|
161
118
|
# Log the exception for debugging
|
|
162
119
|
logger.error(f"Error parsing tool result: {e}")
|
mcp_use/agents/mcpagent.py
CHANGED
|
@@ -213,6 +213,42 @@ class MCPAgent:
|
|
|
213
213
|
self._initialized = True
|
|
214
214
|
logger.info("✨ Agent initialization complete")
|
|
215
215
|
|
|
216
|
+
def _normalize_output(self, value: object) -> str:
|
|
217
|
+
"""Normalize model outputs into a plain text string."""
|
|
218
|
+
try:
|
|
219
|
+
if isinstance(value, str):
|
|
220
|
+
return value
|
|
221
|
+
|
|
222
|
+
# LangChain messages may have .content which is str or list-like
|
|
223
|
+
content = getattr(value, "content", None)
|
|
224
|
+
if content is not None:
|
|
225
|
+
return self._normalize_output(content)
|
|
226
|
+
|
|
227
|
+
if isinstance(value, list):
|
|
228
|
+
parts: list[str] = []
|
|
229
|
+
for item in value:
|
|
230
|
+
if isinstance(item, dict):
|
|
231
|
+
if "text" in item and isinstance(item["text"], str):
|
|
232
|
+
parts.append(item["text"])
|
|
233
|
+
elif "content" in item:
|
|
234
|
+
parts.append(self._normalize_output(item["content"]))
|
|
235
|
+
else:
|
|
236
|
+
# Fallback to str for unknown shapes
|
|
237
|
+
parts.append(str(item))
|
|
238
|
+
else:
|
|
239
|
+
# recurse on .content or str
|
|
240
|
+
part_content = getattr(item, "text", None)
|
|
241
|
+
if isinstance(part_content, str):
|
|
242
|
+
parts.append(part_content)
|
|
243
|
+
else:
|
|
244
|
+
parts.append(self._normalize_output(getattr(item, "content", item)))
|
|
245
|
+
return "".join(parts)
|
|
246
|
+
|
|
247
|
+
return str(value)
|
|
248
|
+
|
|
249
|
+
except Exception:
|
|
250
|
+
return str(value)
|
|
251
|
+
|
|
216
252
|
async def _create_system_message_from_tools(self, tools: list[BaseTool]) -> None:
|
|
217
253
|
"""Create the system message based on provided tools using the builder."""
|
|
218
254
|
# Use the override if provided, otherwise use the imported default
|
|
@@ -232,9 +268,12 @@ class MCPAgent:
|
|
|
232
268
|
)
|
|
233
269
|
|
|
234
270
|
# Update conversation history if memory is enabled
|
|
271
|
+
# Note: The system message should not be included in the conversation history,
|
|
272
|
+
# as it will be automatically added using the create_tool_calling_agent function with the prompt parameter
|
|
235
273
|
if self.memory_enabled:
|
|
236
|
-
|
|
237
|
-
|
|
274
|
+
self._conversation_history = [
|
|
275
|
+
msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)
|
|
276
|
+
]
|
|
238
277
|
|
|
239
278
|
def _create_agent(self) -> AgentExecutor:
|
|
240
279
|
"""Create the LangChain agent with the configured system message.
|
|
@@ -248,14 +287,25 @@ class MCPAgent:
|
|
|
248
287
|
if self._system_message:
|
|
249
288
|
system_content = self._system_message.content
|
|
250
289
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
290
|
+
if self.memory_enabled:
|
|
291
|
+
# Query already in chat_history — don't re-inject it
|
|
292
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
293
|
+
[
|
|
294
|
+
("system", system_content),
|
|
295
|
+
MessagesPlaceholder(variable_name="chat_history"),
|
|
296
|
+
("human", "{input}"),
|
|
297
|
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
298
|
+
]
|
|
299
|
+
)
|
|
300
|
+
else:
|
|
301
|
+
# No memory — inject input directly
|
|
302
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
303
|
+
[
|
|
304
|
+
("system", system_content),
|
|
305
|
+
("human", "{input}"),
|
|
306
|
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
307
|
+
]
|
|
308
|
+
)
|
|
259
309
|
|
|
260
310
|
tool_names = [tool.name for tool in self._tools]
|
|
261
311
|
logger.info(f"🧠 Agent ready with tools: {', '.join(tool_names)}")
|
|
@@ -286,10 +336,6 @@ class MCPAgent:
|
|
|
286
336
|
"""Clear the conversation history."""
|
|
287
337
|
self._conversation_history = []
|
|
288
338
|
|
|
289
|
-
# Re-add the system message if it exists
|
|
290
|
-
if self._system_message and self.memory_enabled:
|
|
291
|
-
self._conversation_history = [self._system_message]
|
|
292
|
-
|
|
293
339
|
def add_to_history(self, message: BaseMessage) -> None:
|
|
294
340
|
"""Add a message to the conversation history.
|
|
295
341
|
|
|
@@ -315,15 +361,6 @@ class MCPAgent:
|
|
|
315
361
|
"""
|
|
316
362
|
self._system_message = SystemMessage(content=message)
|
|
317
363
|
|
|
318
|
-
# Update conversation history if memory is enabled
|
|
319
|
-
if self.memory_enabled:
|
|
320
|
-
# Remove old system message if it exists
|
|
321
|
-
history_without_system = [msg for msg in self._conversation_history if not isinstance(msg, SystemMessage)]
|
|
322
|
-
self._conversation_history = history_without_system
|
|
323
|
-
|
|
324
|
-
# Add new system message
|
|
325
|
-
self._conversation_history.insert(0, self._system_message)
|
|
326
|
-
|
|
327
364
|
# Recreate the agent with the new system message if initialized
|
|
328
365
|
if self._initialized and self._tools:
|
|
329
366
|
self._agent_executor = self._create_agent()
|
|
@@ -467,10 +504,6 @@ class MCPAgent:
|
|
|
467
504
|
display_query = query[:50].replace("\n", " ") + "..." if len(query) > 50 else query.replace("\n", " ")
|
|
468
505
|
logger.info(f"💬 Received query: '{display_query}'")
|
|
469
506
|
|
|
470
|
-
# Add the user query to conversation history if memory is enabled
|
|
471
|
-
if self.memory_enabled:
|
|
472
|
-
self.add_to_history(HumanMessage(content=query))
|
|
473
|
-
|
|
474
507
|
# Use the provided history or the internal history
|
|
475
508
|
history_to_use = external_history if external_history is not None else self._conversation_history
|
|
476
509
|
|
|
@@ -492,6 +525,10 @@ class MCPAgent:
|
|
|
492
525
|
|
|
493
526
|
logger.info(f"🏁 Starting agent execution with max_steps={steps}")
|
|
494
527
|
|
|
528
|
+
# Track whether agent finished successfully vs reached max iterations
|
|
529
|
+
agent_finished_successfully = False
|
|
530
|
+
result = None
|
|
531
|
+
|
|
495
532
|
# Create a run manager with our callbacks if we have any - ONCE for the entire execution
|
|
496
533
|
run_manager = None
|
|
497
534
|
if self.callbacks:
|
|
@@ -578,7 +615,9 @@ class MCPAgent:
|
|
|
578
615
|
# Process the output
|
|
579
616
|
if isinstance(next_step_output, AgentFinish):
|
|
580
617
|
logger.info(f"✅ Agent finished at step {step_num + 1}")
|
|
581
|
-
|
|
618
|
+
agent_finished_successfully = True
|
|
619
|
+
output_value = next_step_output.return_values.get("output", "No output generated")
|
|
620
|
+
result = self._normalize_output(output_value)
|
|
582
621
|
# End the chain if we have a run manager
|
|
583
622
|
if run_manager:
|
|
584
623
|
await run_manager.on_chain_end({"output": result})
|
|
@@ -659,7 +698,9 @@ class MCPAgent:
|
|
|
659
698
|
tool_return = self._agent_executor._get_tool_return(last_step)
|
|
660
699
|
if tool_return is not None:
|
|
661
700
|
logger.info(f"🏆 Tool returned directly at step {step_num + 1}")
|
|
701
|
+
agent_finished_successfully = True
|
|
662
702
|
result = tool_return.return_values.get("output", "No output generated")
|
|
703
|
+
result = self._normalize_output(result)
|
|
663
704
|
break
|
|
664
705
|
|
|
665
706
|
except OutputParserException as e:
|
|
@@ -681,10 +722,16 @@ class MCPAgent:
|
|
|
681
722
|
|
|
682
723
|
# --- Loop finished ---
|
|
683
724
|
if not result:
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
725
|
+
if agent_finished_successfully:
|
|
726
|
+
# Agent finished successfully but returned empty output
|
|
727
|
+
result = "Agent completed the task successfully."
|
|
728
|
+
logger.info("✅ Agent finished successfully with empty output")
|
|
729
|
+
else:
|
|
730
|
+
# Agent actually reached max iterations
|
|
731
|
+
logger.warning(f"⚠️ Agent stopped after reaching max iterations ({steps})")
|
|
732
|
+
result = f"Agent stopped after reaching the maximum number of steps ({steps})."
|
|
733
|
+
if run_manager:
|
|
734
|
+
await run_manager.on_chain_end({"output": result})
|
|
688
735
|
|
|
689
736
|
# If structured output was requested but not achieved, attempt one final time
|
|
690
737
|
if output_schema and structured_llm and not success:
|
|
@@ -707,8 +754,11 @@ class MCPAgent:
|
|
|
707
754
|
logger.error(f"❌ Final structured output attempt failed: {e}")
|
|
708
755
|
raise RuntimeError(f"Failed to generate structured output after {steps} steps: {str(e)}") from e
|
|
709
756
|
|
|
757
|
+
if self.memory_enabled:
|
|
758
|
+
self.add_to_history(HumanMessage(content=query))
|
|
759
|
+
|
|
710
760
|
if self.memory_enabled and not output_schema:
|
|
711
|
-
self.add_to_history(AIMessage(content=result))
|
|
761
|
+
self.add_to_history(AIMessage(content=self._normalize_output(result)))
|
|
712
762
|
|
|
713
763
|
logger.info(f"🎉 Agent execution complete in {time.time() - start_time} seconds")
|
|
714
764
|
if not success:
|
|
@@ -861,7 +911,7 @@ class MCPAgent:
|
|
|
861
911
|
steps_taken=steps_taken,
|
|
862
912
|
tools_used_count=len(self.tools_used_names),
|
|
863
913
|
tools_used_names=self.tools_used_names,
|
|
864
|
-
response=str(result),
|
|
914
|
+
response=str(self._normalize_output(result)),
|
|
865
915
|
execution_time_ms=int((time.time() - start_time) * 1000),
|
|
866
916
|
error_type=error,
|
|
867
917
|
conversation_history_length=len(self._conversation_history),
|
|
@@ -964,9 +1014,6 @@ class MCPAgent:
|
|
|
964
1014
|
effective_max_steps = max_steps or self.max_steps
|
|
965
1015
|
self._agent_executor.max_iterations = effective_max_steps
|
|
966
1016
|
|
|
967
|
-
if self.memory_enabled:
|
|
968
|
-
self.add_to_history(HumanMessage(content=query))
|
|
969
|
-
|
|
970
1017
|
history_to_use = external_history if external_history is not None else self._conversation_history
|
|
971
1018
|
inputs = {"input": query, "chat_history": history_to_use}
|
|
972
1019
|
|
|
@@ -979,6 +1026,10 @@ class MCPAgent:
|
|
|
979
1026
|
if not isinstance(message, ToolAgentAction):
|
|
980
1027
|
self.add_to_history(message)
|
|
981
1028
|
yield event
|
|
1029
|
+
|
|
1030
|
+
if self.memory_enabled:
|
|
1031
|
+
self.add_to_history(HumanMessage(content=query))
|
|
1032
|
+
|
|
982
1033
|
# 5. House-keeping -------------------------------------------------------
|
|
983
1034
|
# Restrict agent cleanup in _generate_response_chunks_async to only occur
|
|
984
1035
|
# when the agent was initialized in this generator and is not client-managed
|
|
@@ -5,16 +5,7 @@ You have access to the following tools:
|
|
|
5
5
|
|
|
6
6
|
{tool_descriptions}
|
|
7
7
|
|
|
8
|
-
Use
|
|
9
|
-
|
|
10
|
-
Question: the input question you must answer
|
|
11
|
-
Thought: you should always think about what to do
|
|
12
|
-
Action: the action to take, should be one of the available tools
|
|
13
|
-
Action Input: the input to the action
|
|
14
|
-
Observation: the result of the action
|
|
15
|
-
... (this Thought/Action/Action Input/Observation can repeat N times)
|
|
16
|
-
Thought: I now know the final answer
|
|
17
|
-
Final Answer: the final answer to the original input question"""
|
|
8
|
+
Use these tools to help answer questions and complete tasks as needed."""
|
|
18
9
|
|
|
19
10
|
|
|
20
11
|
SERVER_MANAGER_SYSTEM_PROMPT_TEMPLATE = """You are a helpful assistant designed
|