mcp-use 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcp-use might be problematic. Click here for more details.
- mcp_use/__init__.py +30 -0
- mcp_use/agents/__init__.py +12 -0
- mcp_use/agents/base.py +63 -0
- mcp_use/agents/langchain_agent.py +241 -0
- mcp_use/agents/mcpagent.py +149 -0
- mcp_use/client.py +226 -0
- mcp_use/config.py +113 -0
- mcp_use/connectors/__init__.py +13 -0
- mcp_use/connectors/base.py +61 -0
- mcp_use/connectors/http.py +126 -0
- mcp_use/connectors/stdio.py +124 -0
- mcp_use/connectors/websocket.py +142 -0
- mcp_use/logging.py +96 -0
- mcp_use/session.py +168 -0
- mcp_use/tools/__init__.py +11 -0
- mcp_use/tools/converter.py +108 -0
- mcp_use/tools/formats.py +181 -0
- mcp_use/types.py +33 -0
- mcp_use-0.1.0.dist-info/METADATA +287 -0
- mcp_use-0.1.0.dist-info/RECORD +23 -0
- mcp_use-0.1.0.dist-info/WHEEL +5 -0
- mcp_use-0.1.0.dist-info/licenses/LICENSE +21 -0
- mcp_use-0.1.0.dist-info/top_level.txt +1 -0
mcp_use/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""
|
|
2
|
+
mcp_use - A model-agnostic MCP (Multi-Channel Platform) library for LLMs.
|
|
3
|
+
|
|
4
|
+
This library provides a unified interface for connecting different LLMs
|
|
5
|
+
to MCP tools through existing LangChain adapters.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .agents.mcpagent import MCPAgent
|
|
9
|
+
from .client import MCPClient
|
|
10
|
+
from .config import create_session_from_config, load_config_file
|
|
11
|
+
from .connectors import BaseConnector, HttpConnector, StdioConnector, WebSocketConnector
|
|
12
|
+
from .logging import logger
|
|
13
|
+
from .session import MCPSession
|
|
14
|
+
from .tools.converter import ModelProvider, ToolConverter
|
|
15
|
+
|
|
16
|
+
__version__ = "0.1.0"
|
|
17
|
+
__all__ = [
|
|
18
|
+
"MCPAgent",
|
|
19
|
+
"MCPClient",
|
|
20
|
+
"MCPSession",
|
|
21
|
+
"BaseConnector",
|
|
22
|
+
"StdioConnector",
|
|
23
|
+
"WebSocketConnector",
|
|
24
|
+
"HttpConnector",
|
|
25
|
+
"ModelProvider",
|
|
26
|
+
"ToolConverter",
|
|
27
|
+
"create_session_from_config",
|
|
28
|
+
"load_config_file",
|
|
29
|
+
"logger",
|
|
30
|
+
]
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent implementations for using MCP tools.
|
|
3
|
+
|
|
4
|
+
This module provides ready-to-use agent implementations
|
|
5
|
+
that are pre-configured for using MCP tools.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .base import BaseAgent
|
|
9
|
+
from .langchain_agent import LangChainAgent
|
|
10
|
+
from .mcpagent import MCPAgent
|
|
11
|
+
|
|
12
|
+
__all__ = ["BaseAgent", "LangChainAgent", "MCPAgent"]
|
mcp_use/agents/base.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base agent interface for MCP tools.
|
|
3
|
+
|
|
4
|
+
This module provides a base class for agents that use MCP tools.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC, abstractmethod
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from ..session import MCPSession
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseAgent(ABC):
|
|
14
|
+
"""Base class for agents that use MCP tools.
|
|
15
|
+
|
|
16
|
+
This abstract class defines the interface for agents that use MCP tools.
|
|
17
|
+
Agents are responsible for integrating LLMs with MCP tools.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, session: MCPSession):
|
|
21
|
+
"""Initialize a new agent.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
session: The MCP session to use for tool calls.
|
|
25
|
+
"""
|
|
26
|
+
self.session = session
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
async def initialize(self) -> None:
|
|
30
|
+
"""Initialize the agent.
|
|
31
|
+
|
|
32
|
+
This method should prepare the agent for use, including initializing
|
|
33
|
+
the MCP session and setting up any necessary components.
|
|
34
|
+
"""
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
@abstractmethod
|
|
38
|
+
async def run(self, query: str, max_steps: int = 10) -> dict[str, Any]:
|
|
39
|
+
"""Run the agent with a query.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
query: The query to run.
|
|
43
|
+
max_steps: The maximum number of steps to run.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
The final result from the agent.
|
|
47
|
+
"""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
@abstractmethod
|
|
51
|
+
async def step(
|
|
52
|
+
self, query: str, previous_steps: list[dict[str, Any]] | None = None
|
|
53
|
+
) -> dict[str, Any]:
|
|
54
|
+
"""Perform a single step of the agent.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
query: The query to run.
|
|
58
|
+
previous_steps: Optional list of previous steps.
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
The result of the step.
|
|
62
|
+
"""
|
|
63
|
+
pass
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LangChain agent implementation for MCP tools.
|
|
3
|
+
|
|
4
|
+
This module provides a LangChain agent implementation that can use MCP tools
|
|
5
|
+
through a unified interface.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, NoReturn
|
|
9
|
+
|
|
10
|
+
from jsonschema_pydantic import jsonschema_to_pydantic
|
|
11
|
+
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
|
12
|
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
13
|
+
from langchain.schema.language_model import BaseLanguageModel
|
|
14
|
+
from langchain_core.tools import BaseTool, ToolException
|
|
15
|
+
from mcp.types import CallToolResult, EmbeddedResource, ImageContent, TextContent
|
|
16
|
+
from pydantic import BaseModel
|
|
17
|
+
|
|
18
|
+
from ..connectors.base import BaseConnector
|
|
19
|
+
from ..logging import logger
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _parse_mcp_tool_result(tool_result: CallToolResult) -> str:
|
|
23
|
+
"""Parse the content of a CallToolResult into a string.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
tool_result: The result object from calling an MCP tool.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
A string representation of the tool result content.
|
|
30
|
+
|
|
31
|
+
Raises:
|
|
32
|
+
ToolException: If the tool execution failed, returned no content,
|
|
33
|
+
or contained unexpected content types.
|
|
34
|
+
"""
|
|
35
|
+
if tool_result.isError:
|
|
36
|
+
raise ToolException(f"Tool execution failed: {tool_result.content}")
|
|
37
|
+
|
|
38
|
+
if not tool_result.content:
|
|
39
|
+
raise ToolException("Tool execution returned no content")
|
|
40
|
+
|
|
41
|
+
decoded_result = ""
|
|
42
|
+
for item in tool_result.content:
|
|
43
|
+
match item.type:
|
|
44
|
+
case "text":
|
|
45
|
+
item: TextContent
|
|
46
|
+
decoded_result += item.text
|
|
47
|
+
case "image":
|
|
48
|
+
item: ImageContent
|
|
49
|
+
decoded_result += item.data # Assuming data is string-like or base64
|
|
50
|
+
case "resource":
|
|
51
|
+
resource: EmbeddedResource = item.resource
|
|
52
|
+
if hasattr(resource, "text"):
|
|
53
|
+
decoded_result += resource.text
|
|
54
|
+
elif hasattr(resource, "blob"):
|
|
55
|
+
# Assuming blob needs decoding or specific handling; adjust as needed
|
|
56
|
+
decoded_result += (
|
|
57
|
+
resource.blob.decode()
|
|
58
|
+
if isinstance(resource.blob, bytes)
|
|
59
|
+
else str(resource.blob)
|
|
60
|
+
)
|
|
61
|
+
else:
|
|
62
|
+
raise ToolException(f"Unexpected resource type: {resource.type}")
|
|
63
|
+
case _:
|
|
64
|
+
raise ToolException(f"Unexpected content type: {item.type}")
|
|
65
|
+
|
|
66
|
+
return decoded_result
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class LangChainAgent:
|
|
70
|
+
"""LangChain agent that can use MCP tools.
|
|
71
|
+
|
|
72
|
+
This agent uses LangChain's agent framework to interact with MCP tools
|
|
73
|
+
through a unified interface.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self, connector: BaseConnector, llm: BaseLanguageModel, max_steps: int = 5
|
|
78
|
+
) -> None:
|
|
79
|
+
"""Initialize a new LangChain agent.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
connector: The MCP connector to use.
|
|
83
|
+
llm: The LangChain LLM to use.
|
|
84
|
+
max_steps: The maximum number of steps to take.
|
|
85
|
+
"""
|
|
86
|
+
self.connector = connector
|
|
87
|
+
self.llm = llm
|
|
88
|
+
self.max_steps = max_steps
|
|
89
|
+
self.tools: list[BaseTool] = []
|
|
90
|
+
self.agent: AgentExecutor | None = None
|
|
91
|
+
|
|
92
|
+
async def initialize(self) -> None:
|
|
93
|
+
"""Initialize the agent and its tools."""
|
|
94
|
+
self.tools = await self._create_langchain_tools()
|
|
95
|
+
self.agent = self._create_agent()
|
|
96
|
+
|
|
97
|
+
def fix_schema(self, schema: dict) -> dict:
|
|
98
|
+
"""Convert JSON Schema 'type': ['string', 'null'] to 'anyOf' format.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
schema: The JSON schema to fix.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
The fixed JSON schema.
|
|
105
|
+
"""
|
|
106
|
+
if isinstance(schema, dict):
|
|
107
|
+
if "type" in schema and isinstance(schema["type"], list):
|
|
108
|
+
schema["anyOf"] = [{"type": t} for t in schema["type"]]
|
|
109
|
+
del schema["type"] # Remove 'type' and standardize to 'anyOf'
|
|
110
|
+
for key, value in schema.items():
|
|
111
|
+
schema[key] = self.fix_schema(value) # Apply recursively
|
|
112
|
+
return schema
|
|
113
|
+
|
|
114
|
+
async def _create_langchain_tools(self) -> list[BaseTool]:
|
|
115
|
+
"""Create LangChain tools from MCP tools.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
A list of LangChain tools created from MCP tools.
|
|
119
|
+
"""
|
|
120
|
+
tools = self.connector.tools
|
|
121
|
+
local_connector = self.connector
|
|
122
|
+
|
|
123
|
+
# Wrap MCP tools into LangChain tools
|
|
124
|
+
langchain_tools: list[BaseTool] = []
|
|
125
|
+
for tool in tools:
|
|
126
|
+
# Define adapter class to convert MCP tool to LangChain format
|
|
127
|
+
class McpToLangChainAdapter(BaseTool):
|
|
128
|
+
name: str = tool.name or "NO NAME"
|
|
129
|
+
description: str = tool.description or ""
|
|
130
|
+
# Convert JSON schema to Pydantic model for argument validation
|
|
131
|
+
args_schema: type[BaseModel] = jsonschema_to_pydantic(
|
|
132
|
+
self.fix_schema(tool.inputSchema) # Apply schema conversion
|
|
133
|
+
)
|
|
134
|
+
connector: BaseConnector = local_connector
|
|
135
|
+
handle_tool_error: bool = True
|
|
136
|
+
|
|
137
|
+
def _run(self, **kwargs: Any) -> NoReturn:
|
|
138
|
+
"""Synchronous run method that always raises an error.
|
|
139
|
+
|
|
140
|
+
Raises:
|
|
141
|
+
NotImplementedError: Always raises this error because MCP tools
|
|
142
|
+
only support async operations.
|
|
143
|
+
"""
|
|
144
|
+
raise NotImplementedError("MCP tools only support async operations")
|
|
145
|
+
|
|
146
|
+
async def _arun(self, **kwargs: Any) -> Any:
|
|
147
|
+
"""Asynchronously execute the tool with given arguments.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
kwargs: The arguments to pass to the tool.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
The result of the tool execution.
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
ToolException: If tool execution fails.
|
|
157
|
+
"""
|
|
158
|
+
logger.info(f'MCP tool: "{self.name}" received input: {kwargs}')
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
tool_result: CallToolResult = await self.connector.call_tool(
|
|
162
|
+
self.name, kwargs
|
|
163
|
+
)
|
|
164
|
+
try:
|
|
165
|
+
# Use the helper function to parse the result
|
|
166
|
+
return _parse_mcp_tool_result(tool_result)
|
|
167
|
+
except Exception as e:
|
|
168
|
+
# Log the exception for debugging
|
|
169
|
+
logger.error(f"Error parsing tool result: {e}")
|
|
170
|
+
# Shortened line:
|
|
171
|
+
return (
|
|
172
|
+
f"Error parsing result: {e!s}; Raw content: {tool_result.content!r}"
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
except Exception as e:
|
|
176
|
+
if self.handle_tool_error:
|
|
177
|
+
return f"Error executing MCP tool: {str(e)}"
|
|
178
|
+
raise
|
|
179
|
+
|
|
180
|
+
langchain_tools.append(McpToLangChainAdapter())
|
|
181
|
+
|
|
182
|
+
# Log available tools for debugging
|
|
183
|
+
logger.info(f"Available tools: {[tool.name for tool in langchain_tools]}")
|
|
184
|
+
return langchain_tools
|
|
185
|
+
|
|
186
|
+
def _create_agent(self) -> AgentExecutor:
|
|
187
|
+
"""Create the LangChain agent.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
An initialized AgentExecutor.
|
|
191
|
+
"""
|
|
192
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
193
|
+
[
|
|
194
|
+
(
|
|
195
|
+
"system",
|
|
196
|
+
"You are a helpful AI assistant that can use tools to help users.",
|
|
197
|
+
),
|
|
198
|
+
MessagesPlaceholder(variable_name="chat_history"),
|
|
199
|
+
("human", "{input}"),
|
|
200
|
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
201
|
+
]
|
|
202
|
+
)
|
|
203
|
+
agent = create_tool_calling_agent(llm=self.llm, tools=self.tools, prompt=prompt)
|
|
204
|
+
print(self.tools)
|
|
205
|
+
return AgentExecutor(
|
|
206
|
+
agent=agent, tools=self.tools, max_iterations=self.max_steps, verbose=True
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
async def run(
|
|
210
|
+
self,
|
|
211
|
+
query: str,
|
|
212
|
+
max_steps: int | None = None,
|
|
213
|
+
chat_history: list | None = None,
|
|
214
|
+
) -> str:
|
|
215
|
+
"""Run the agent on a query.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
query: The query to run.
|
|
219
|
+
max_steps: Optional maximum number of steps to take.
|
|
220
|
+
chat_history: Optional chat history.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
The result of running the query.
|
|
224
|
+
|
|
225
|
+
Raises:
|
|
226
|
+
RuntimeError: If the MCP client is not initialized.
|
|
227
|
+
"""
|
|
228
|
+
if not self.agent:
|
|
229
|
+
raise RuntimeError("MCP client is not initialized")
|
|
230
|
+
|
|
231
|
+
if max_steps is not None:
|
|
232
|
+
self.agent.max_iterations = max_steps
|
|
233
|
+
|
|
234
|
+
# Initialize empty chat history if none provided
|
|
235
|
+
if chat_history is None:
|
|
236
|
+
chat_history = []
|
|
237
|
+
|
|
238
|
+
# Invoke with all required variables
|
|
239
|
+
result = await self.agent.ainvoke({"input": query, "chat_history": chat_history})
|
|
240
|
+
|
|
241
|
+
return result["output"]
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model-Agnostic MCP: Main integration module.
|
|
3
|
+
|
|
4
|
+
This module provides the main MCPAgent class that integrates all components
|
|
5
|
+
to provide a simple interface for using MCP tools with different LLMs.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from langchain.schema.language_model import BaseLanguageModel
|
|
9
|
+
|
|
10
|
+
from mcp_use.client import MCPClient
|
|
11
|
+
from mcp_use.connectors.base import BaseConnector
|
|
12
|
+
from mcp_use.session import MCPSession
|
|
13
|
+
|
|
14
|
+
from ..logging import logger
|
|
15
|
+
from .langchain_agent import LangChainAgent
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class MCPAgent:
|
|
19
|
+
"""Main class for using MCP tools with various LLM providers.
|
|
20
|
+
|
|
21
|
+
This class provides a unified interface for using MCP tools with different LLM providers
|
|
22
|
+
through LangChain's agent framework.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
llm: BaseLanguageModel,
|
|
28
|
+
client: MCPClient | None = None,
|
|
29
|
+
connector: BaseConnector | None = None,
|
|
30
|
+
server_name: str | None = None,
|
|
31
|
+
max_steps: int = 5,
|
|
32
|
+
auto_initialize: bool = False,
|
|
33
|
+
):
|
|
34
|
+
"""Initialize a new MCPAgent instance.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
llm: The LangChain LLM to use.
|
|
38
|
+
client: The MCPClient to use. If provided, connector is ignored.
|
|
39
|
+
connector: The MCP connector to use if client is not provided.
|
|
40
|
+
server_name: The name of the server to use if client is provided.
|
|
41
|
+
max_steps: The maximum number of steps to take.
|
|
42
|
+
auto_initialize: Whether to automatically initialize the agent when run is called.
|
|
43
|
+
"""
|
|
44
|
+
self.llm = llm
|
|
45
|
+
self.client = client
|
|
46
|
+
self.connector = connector
|
|
47
|
+
self.server_name = server_name
|
|
48
|
+
self.max_steps = max_steps
|
|
49
|
+
self.auto_initialize = auto_initialize
|
|
50
|
+
self._initialized = False
|
|
51
|
+
|
|
52
|
+
# Either client or connector must be provided
|
|
53
|
+
if not client and not connector:
|
|
54
|
+
raise ValueError("Either client or connector must be provided")
|
|
55
|
+
|
|
56
|
+
self._agent: LangChainAgent | None = None
|
|
57
|
+
self._session: MCPSession | None = None
|
|
58
|
+
|
|
59
|
+
async def initialize(self) -> None:
|
|
60
|
+
"""Initialize the MCP client and agent."""
|
|
61
|
+
# If using client, get or create a session
|
|
62
|
+
if self.client:
|
|
63
|
+
try:
|
|
64
|
+
self._session = self.client.get_session(self.server_name)
|
|
65
|
+
except ValueError:
|
|
66
|
+
self._session = await self.client.create_session(self.server_name)
|
|
67
|
+
connector_to_use = self._session.connector
|
|
68
|
+
else:
|
|
69
|
+
# Using direct connector
|
|
70
|
+
connector_to_use = self.connector
|
|
71
|
+
await connector_to_use.connect()
|
|
72
|
+
await connector_to_use.initialize()
|
|
73
|
+
|
|
74
|
+
# Create the agent
|
|
75
|
+
self._agent = LangChainAgent(
|
|
76
|
+
connector=connector_to_use, llm=self.llm, max_steps=self.max_steps
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Initialize the agent
|
|
80
|
+
await self._agent.initialize()
|
|
81
|
+
self._initialized = True
|
|
82
|
+
|
|
83
|
+
async def close(self) -> None:
|
|
84
|
+
"""Close the MCP connection."""
|
|
85
|
+
try:
|
|
86
|
+
if self._agent:
|
|
87
|
+
# Clean up the agent first
|
|
88
|
+
self._agent = None
|
|
89
|
+
|
|
90
|
+
# If using client with session, close the session through client
|
|
91
|
+
if self.client and self._session:
|
|
92
|
+
await self.client.close_session(self.server_name)
|
|
93
|
+
# If using direct connector, disconnect
|
|
94
|
+
elif self.connector:
|
|
95
|
+
await self.connector.disconnect()
|
|
96
|
+
|
|
97
|
+
self._initialized = False
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.warning(f"Warning: Error during agent closure: {e}")
|
|
100
|
+
# Still try to clean up even if there was an error
|
|
101
|
+
self._agent = None
|
|
102
|
+
self._initialized = False
|
|
103
|
+
|
|
104
|
+
async def run(
|
|
105
|
+
self, query: str, max_steps: int | None = None, manage_connector: bool = True
|
|
106
|
+
) -> str:
|
|
107
|
+
"""Run a query using the MCP tools.
|
|
108
|
+
|
|
109
|
+
This method handles connecting to the MCP server, initializing the agent,
|
|
110
|
+
running the query, and then cleaning up the connection.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
query: The query to run.
|
|
114
|
+
max_steps: Optional maximum number of steps to take.
|
|
115
|
+
manage_connector: Whether to handle the connector lifecycle internally.
|
|
116
|
+
If True, this method will connect, initialize, and disconnect from
|
|
117
|
+
the connector automatically. If False, the caller is responsible
|
|
118
|
+
for managing the connector lifecycle.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
The result of running the query.
|
|
122
|
+
"""
|
|
123
|
+
result = ""
|
|
124
|
+
try:
|
|
125
|
+
if manage_connector:
|
|
126
|
+
# Initialize if needed
|
|
127
|
+
if not self._initialized or not self._agent:
|
|
128
|
+
await self.initialize()
|
|
129
|
+
|
|
130
|
+
# Run the query
|
|
131
|
+
if not self._agent:
|
|
132
|
+
raise RuntimeError("MCP client failed to initialize")
|
|
133
|
+
|
|
134
|
+
result = await self._agent.run(query, max_steps)
|
|
135
|
+
else:
|
|
136
|
+
# Caller is managing connector lifecycle
|
|
137
|
+
if not self._initialized and self.auto_initialize:
|
|
138
|
+
await self.initialize()
|
|
139
|
+
|
|
140
|
+
if not self._agent:
|
|
141
|
+
raise RuntimeError("MCP client is not initialized")
|
|
142
|
+
|
|
143
|
+
result = await self._agent.run(query, max_steps)
|
|
144
|
+
|
|
145
|
+
return result
|
|
146
|
+
finally:
|
|
147
|
+
# Make sure to clean up the connection if we're managing it
|
|
148
|
+
if manage_connector and not self.client:
|
|
149
|
+
await self.close()
|