alita-sdk 0.3.423__py3-none-any.whl → 0.3.449__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/clients/client.py +45 -9
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/langchain/assistant.py +10 -2
- alita_sdk/runtime/langchain/constants.py +1 -1
- alita_sdk/runtime/langchain/langraph_agent.py +4 -1
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +24 -0
- alita_sdk/runtime/toolkits/mcp.py +892 -0
- alita_sdk/runtime/toolkits/tools.py +61 -3
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/utils/mcp_oauth.py +164 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +347 -0
- alita_sdk/runtime/utils/streamlit.py +34 -3
- alita_sdk/runtime/utils/toolkit_utils.py +14 -4
- alita_sdk/tools/__init__.py +5 -0
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/gitlab/api_wrapper.py +5 -0
- alita_sdk/tools/qtest/api_wrapper.py +240 -39
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.449.dist-info}/METADATA +2 -1
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.449.dist-info}/RECORD +26 -18
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.449.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.449.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.449.dist-info}/top_level.txt +0 -0
|
@@ -12,12 +12,14 @@ from .datasource import DatasourcesToolkit
|
|
|
12
12
|
from .prompt import PromptToolkit
|
|
13
13
|
from .subgraph import SubgraphToolkit
|
|
14
14
|
from .vectorstore import VectorStoreToolkit
|
|
15
|
+
from .mcp import McpToolkit
|
|
15
16
|
from ..tools.mcp_server_tool import McpServerTool
|
|
16
17
|
from ..tools.sandbox import SandboxToolkit
|
|
17
18
|
from ..tools.image_generation import ImageGenerationToolkit
|
|
18
19
|
# Import community tools
|
|
19
20
|
from ...community import get_toolkits as community_toolkits, get_tools as community_tools
|
|
20
21
|
from ...tools.memory import MemoryToolkit
|
|
22
|
+
from ..utils.mcp_oauth import canonical_resource, McpAuthorizationRequired
|
|
21
23
|
from ...tools.utils import TOOLKIT_SPLITTER
|
|
22
24
|
|
|
23
25
|
logger = logging.getLogger(__name__)
|
|
@@ -29,13 +31,14 @@ def get_toolkits():
|
|
|
29
31
|
MemoryToolkit.toolkit_config_schema(),
|
|
30
32
|
VectorStoreToolkit.toolkit_config_schema(),
|
|
31
33
|
SandboxToolkit.toolkit_config_schema(),
|
|
32
|
-
ImageGenerationToolkit.toolkit_config_schema()
|
|
34
|
+
ImageGenerationToolkit.toolkit_config_schema(),
|
|
35
|
+
McpToolkit.toolkit_config_schema()
|
|
33
36
|
]
|
|
34
37
|
|
|
35
38
|
return core_toolkits + community_toolkits() + alita_toolkits()
|
|
36
39
|
|
|
37
40
|
|
|
38
|
-
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False) -> list:
|
|
41
|
+
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None) -> list:
|
|
39
42
|
prompts = []
|
|
40
43
|
tools = []
|
|
41
44
|
|
|
@@ -106,7 +109,52 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
|
106
109
|
llm=llm,
|
|
107
110
|
toolkit_name=tool.get('toolkit_name', ''),
|
|
108
111
|
**tool['settings']).get_tools())
|
|
112
|
+
elif tool['type'] == 'mcp':
|
|
113
|
+
settings = dict(tool['settings'])
|
|
114
|
+
url = settings.get('url')
|
|
115
|
+
headers = settings.get('headers')
|
|
116
|
+
token_data = None
|
|
117
|
+
session_id = None
|
|
118
|
+
if mcp_tokens and url:
|
|
119
|
+
canonical_url = canonical_resource(url)
|
|
120
|
+
logger.info(f"[MCP Auth] Looking for token for URL: {url}")
|
|
121
|
+
logger.info(f"[MCP Auth] Canonical URL: {canonical_url}")
|
|
122
|
+
logger.info(f"[MCP Auth] Available tokens: {list(mcp_tokens.keys())}")
|
|
123
|
+
token_data = mcp_tokens.get(canonical_url)
|
|
124
|
+
if token_data:
|
|
125
|
+
logger.info(f"[MCP Auth] Found token data for {canonical_url}")
|
|
126
|
+
# Handle both old format (string) and new format (dict with access_token and session_id)
|
|
127
|
+
if isinstance(token_data, dict):
|
|
128
|
+
access_token = token_data.get('access_token')
|
|
129
|
+
session_id = token_data.get('session_id')
|
|
130
|
+
logger.info(f"[MCP Auth] Token data: access_token={'present' if access_token else 'missing'}, session_id={session_id or 'none'}")
|
|
131
|
+
else:
|
|
132
|
+
# Backward compatibility: treat as plain token string
|
|
133
|
+
access_token = token_data
|
|
134
|
+
logger.info(f"[MCP Auth] Using legacy token format (string)")
|
|
135
|
+
else:
|
|
136
|
+
access_token = None
|
|
137
|
+
logger.warning(f"[MCP Auth] No token found for {canonical_url}")
|
|
138
|
+
else:
|
|
139
|
+
access_token = None
|
|
140
|
+
|
|
141
|
+
if access_token:
|
|
142
|
+
merged_headers = dict(headers) if headers else {}
|
|
143
|
+
merged_headers.setdefault('Authorization', f'Bearer {access_token}')
|
|
144
|
+
settings['headers'] = merged_headers
|
|
145
|
+
logger.info(f"[MCP Auth] Added Authorization header for {url}")
|
|
146
|
+
|
|
147
|
+
# Pass session_id to MCP toolkit if available
|
|
148
|
+
if session_id:
|
|
149
|
+
settings['session_id'] = session_id
|
|
150
|
+
logger.info(f"[MCP Auth] Passing session_id to toolkit: {session_id}")
|
|
151
|
+
tools.extend(McpToolkit.get_toolkit(
|
|
152
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
153
|
+
client=alita_client,
|
|
154
|
+
**settings).get_tools())
|
|
109
155
|
except Exception as e:
|
|
156
|
+
if isinstance(e, McpAuthorizationRequired):
|
|
157
|
+
raise
|
|
110
158
|
logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
|
|
111
159
|
if debug_mode:
|
|
112
160
|
logger.info("Skipping tool initialization error due to debug mode.")
|
|
@@ -121,7 +169,8 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
|
121
169
|
tools += community_tools(tools_list, alita_client, llm)
|
|
122
170
|
# Add alita tools
|
|
123
171
|
tools += alita_tools(tools_list, alita_client, llm, memory_store)
|
|
124
|
-
# Add MCP tools
|
|
172
|
+
# Add MCP tools registered via alita-mcp CLI (static registry)
|
|
173
|
+
# Note: Tools with type='mcp' are already handled in main loop above
|
|
125
174
|
tools += _mcp_tools(tools_list, alita_client)
|
|
126
175
|
|
|
127
176
|
# Sanitize tool names to meet OpenAI's function naming requirements
|
|
@@ -176,6 +225,10 @@ def _sanitize_tool_names(tools: list) -> list:
|
|
|
176
225
|
|
|
177
226
|
|
|
178
227
|
def _mcp_tools(tools_list, alita):
|
|
228
|
+
"""
|
|
229
|
+
Handle MCP tools registered via alita-mcp CLI (static registry).
|
|
230
|
+
Skips tools with type='mcp' as those are handled by dynamic discovery.
|
|
231
|
+
"""
|
|
179
232
|
try:
|
|
180
233
|
all_available_toolkits = alita.get_mcp_toolkits()
|
|
181
234
|
toolkit_lookup = {tk["name"]: tk for tk in all_available_toolkits}
|
|
@@ -183,6 +236,11 @@ def _mcp_tools(tools_list, alita):
|
|
|
183
236
|
#
|
|
184
237
|
for selected_toolkit in tools_list:
|
|
185
238
|
server_toolkit_name = selected_toolkit['type']
|
|
239
|
+
|
|
240
|
+
# Skip tools with type='mcp' - they're handled by dynamic discovery
|
|
241
|
+
if server_toolkit_name == 'mcp':
|
|
242
|
+
continue
|
|
243
|
+
|
|
186
244
|
toolkit_conf = toolkit_lookup.get(server_toolkit_name)
|
|
187
245
|
#
|
|
188
246
|
if not toolkit_conf:
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Server Inspection Tool.
|
|
3
|
+
Allows inspecting available tools, prompts, and resources on an MCP server.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Type, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from langchain_core.tools import BaseTool
|
|
13
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
14
|
+
import aiohttp
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class McpInspectInput(BaseModel):
|
|
20
|
+
"""Input schema for MCP server inspection tool."""
|
|
21
|
+
|
|
22
|
+
resource_type: str = Field(
|
|
23
|
+
default="all",
|
|
24
|
+
description="What to inspect: 'tools', 'prompts', 'resources', or 'all'"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class McpInspectTool(BaseTool):
|
|
29
|
+
"""Tool for inspecting available tools, prompts, and resources on an MCP server."""
|
|
30
|
+
|
|
31
|
+
name: str = "mcp_inspect"
|
|
32
|
+
description: str = "List available tools, prompts, and resources from the MCP server"
|
|
33
|
+
args_schema: Type[BaseModel] = McpInspectInput
|
|
34
|
+
return_type: str = "str"
|
|
35
|
+
|
|
36
|
+
# MCP server connection details
|
|
37
|
+
server_name: str = Field(..., description="Name of the MCP server")
|
|
38
|
+
server_url: str = Field(..., description="URL of the MCP server")
|
|
39
|
+
server_headers: Optional[Dict[str, str]] = Field(default=None, description="HTTP headers for authentication")
|
|
40
|
+
timeout: int = Field(default=30, description="Request timeout in seconds")
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
43
|
+
|
|
44
|
+
def __getstate__(self):
|
|
45
|
+
"""Custom serialization for pickle compatibility."""
|
|
46
|
+
state = self.__dict__.copy()
|
|
47
|
+
# Convert headers dict to regular dict to avoid any reference issues
|
|
48
|
+
if 'server_headers' in state and state['server_headers'] is not None:
|
|
49
|
+
state['server_headers'] = dict(state['server_headers'])
|
|
50
|
+
return state
|
|
51
|
+
|
|
52
|
+
def __setstate__(self, state):
|
|
53
|
+
"""Custom deserialization for pickle compatibility."""
|
|
54
|
+
# Initialize Pydantic internal attributes if needed
|
|
55
|
+
if '__pydantic_fields_set__' not in state:
|
|
56
|
+
state['__pydantic_fields_set__'] = set(state.keys())
|
|
57
|
+
if '__pydantic_extra__' not in state:
|
|
58
|
+
state['__pydantic_extra__'] = None
|
|
59
|
+
if '__pydantic_private__' not in state:
|
|
60
|
+
state['__pydantic_private__'] = None
|
|
61
|
+
|
|
62
|
+
# Update object state
|
|
63
|
+
self.__dict__.update(state)
|
|
64
|
+
|
|
65
|
+
def _run(self, resource_type: str = "all") -> str:
|
|
66
|
+
"""Inspect the MCP server for available resources."""
|
|
67
|
+
try:
|
|
68
|
+
# Always create a new event loop for sync context
|
|
69
|
+
# This avoids issues with existing event loops in threads
|
|
70
|
+
import concurrent.futures
|
|
71
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
72
|
+
future = executor.submit(self._run_in_new_loop, resource_type)
|
|
73
|
+
return future.result(timeout=self.timeout)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.error(f"Error inspecting MCP server '{self.server_name}': {e}")
|
|
76
|
+
return f"Error inspecting MCP server: {e}"
|
|
77
|
+
|
|
78
|
+
def _run_in_new_loop(self, resource_type: str) -> str:
|
|
79
|
+
"""Run the async inspection in a new event loop."""
|
|
80
|
+
return asyncio.run(self._inspect_server(resource_type))
|
|
81
|
+
|
|
82
|
+
async def _inspect_server(self, resource_type: str) -> str:
|
|
83
|
+
"""Perform the actual MCP server inspection."""
|
|
84
|
+
results = {}
|
|
85
|
+
|
|
86
|
+
# Determine what to inspect
|
|
87
|
+
inspect_tools = resource_type in ["all", "tools"]
|
|
88
|
+
inspect_prompts = resource_type in ["all", "prompts"]
|
|
89
|
+
inspect_resources = resource_type in ["all", "resources"]
|
|
90
|
+
|
|
91
|
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
|
|
92
|
+
|
|
93
|
+
# List tools
|
|
94
|
+
if inspect_tools:
|
|
95
|
+
try:
|
|
96
|
+
tools = await self._list_tools(session)
|
|
97
|
+
results["tools"] = tools
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.warning(f"Failed to list tools from {self.server_name}: {e}")
|
|
100
|
+
results["tools"] = {"error": str(e)}
|
|
101
|
+
|
|
102
|
+
# List prompts
|
|
103
|
+
if inspect_prompts:
|
|
104
|
+
try:
|
|
105
|
+
prompts = await self._list_prompts(session)
|
|
106
|
+
results["prompts"] = prompts
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.warning(f"Failed to list prompts from {self.server_name}: {e}")
|
|
109
|
+
results["prompts"] = {"error": str(e)}
|
|
110
|
+
|
|
111
|
+
# List resources
|
|
112
|
+
if inspect_resources:
|
|
113
|
+
try:
|
|
114
|
+
resources = await self._list_resources(session)
|
|
115
|
+
results["resources"] = resources
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.warning(f"Failed to list resources from {self.server_name}: {e}")
|
|
118
|
+
results["resources"] = {"error": str(e)}
|
|
119
|
+
|
|
120
|
+
return self._format_results(results, resource_type)
|
|
121
|
+
|
|
122
|
+
def _parse_sse(self, text: str) -> Dict[str, Any]:
|
|
123
|
+
"""Parse Server-Sent Events (SSE) format response."""
|
|
124
|
+
for line in text.split('\n'):
|
|
125
|
+
line = line.strip()
|
|
126
|
+
if line.startswith('data:'):
|
|
127
|
+
json_str = line[5:].strip()
|
|
128
|
+
return json.loads(json_str)
|
|
129
|
+
raise ValueError("No data found in SSE response")
|
|
130
|
+
|
|
131
|
+
async def _list_tools(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
132
|
+
"""List available tools from the MCP server."""
|
|
133
|
+
request = {
|
|
134
|
+
"jsonrpc": "2.0",
|
|
135
|
+
"id": f"list_tools_{int(time.time())}",
|
|
136
|
+
"method": "tools/list",
|
|
137
|
+
"params": {}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
headers = {
|
|
141
|
+
"Content-Type": "application/json",
|
|
142
|
+
"Accept": "application/json, text/event-stream",
|
|
143
|
+
**self.server_headers
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
147
|
+
if response.status != 200:
|
|
148
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
149
|
+
|
|
150
|
+
# Handle both JSON and SSE responses
|
|
151
|
+
content_type = response.headers.get('Content-Type', '')
|
|
152
|
+
if 'text/event-stream' in content_type:
|
|
153
|
+
# Parse SSE format
|
|
154
|
+
text = await response.text()
|
|
155
|
+
data = self._parse_sse(text)
|
|
156
|
+
else:
|
|
157
|
+
data = await response.json()
|
|
158
|
+
|
|
159
|
+
if "error" in data:
|
|
160
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
161
|
+
|
|
162
|
+
return data.get("result", {})
|
|
163
|
+
|
|
164
|
+
async def _list_prompts(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
165
|
+
"""List available prompts from the MCP server."""
|
|
166
|
+
request = {
|
|
167
|
+
"jsonrpc": "2.0",
|
|
168
|
+
"id": f"list_prompts_{int(time.time())}",
|
|
169
|
+
"method": "prompts/list",
|
|
170
|
+
"params": {}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
headers = {
|
|
174
|
+
"Content-Type": "application/json",
|
|
175
|
+
"Accept": "application/json, text/event-stream",
|
|
176
|
+
**self.server_headers
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
180
|
+
if response.status != 200:
|
|
181
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
182
|
+
|
|
183
|
+
# Handle both JSON and SSE responses
|
|
184
|
+
content_type = response.headers.get('Content-Type', '')
|
|
185
|
+
if 'text/event-stream' in content_type:
|
|
186
|
+
text = await response.text()
|
|
187
|
+
data = self._parse_sse(text)
|
|
188
|
+
else:
|
|
189
|
+
data = await response.json()
|
|
190
|
+
|
|
191
|
+
if "error" in data:
|
|
192
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
193
|
+
|
|
194
|
+
return data.get("result", {})
|
|
195
|
+
|
|
196
|
+
async def _list_resources(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
197
|
+
"""List available resources from the MCP server."""
|
|
198
|
+
request = {
|
|
199
|
+
"jsonrpc": "2.0",
|
|
200
|
+
"id": f"list_resources_{int(time.time())}",
|
|
201
|
+
"method": "resources/list",
|
|
202
|
+
"params": {}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
headers = {
|
|
206
|
+
"Content-Type": "application/json",
|
|
207
|
+
"Accept": "application/json, text/event-stream",
|
|
208
|
+
**self.server_headers
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
212
|
+
if response.status != 200:
|
|
213
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
214
|
+
|
|
215
|
+
# Handle both JSON and SSE responses
|
|
216
|
+
content_type = response.headers.get('Content-Type', '')
|
|
217
|
+
if 'text/event-stream' in content_type:
|
|
218
|
+
text = await response.text()
|
|
219
|
+
data = self._parse_sse(text)
|
|
220
|
+
else:
|
|
221
|
+
data = await response.json()
|
|
222
|
+
|
|
223
|
+
if "error" in data:
|
|
224
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
225
|
+
|
|
226
|
+
return data.get("result", {})
|
|
227
|
+
|
|
228
|
+
def _format_results(self, results: Dict[str, Any], resource_type: str) -> str:
|
|
229
|
+
"""Format the inspection results for display."""
|
|
230
|
+
output_lines = [f"=== MCP Server Inspection: {self.server_name} ==="]
|
|
231
|
+
output_lines.append(f"Server URL: {self.server_url}")
|
|
232
|
+
output_lines.append("")
|
|
233
|
+
|
|
234
|
+
# Format tools
|
|
235
|
+
if "tools" in results:
|
|
236
|
+
if "error" in results["tools"]:
|
|
237
|
+
output_lines.append(f"❌ TOOLS: Error - {results['tools']['error']}")
|
|
238
|
+
else:
|
|
239
|
+
tools = results["tools"].get("tools", [])
|
|
240
|
+
output_lines.append(f"🔧 TOOLS ({len(tools)} available):")
|
|
241
|
+
if tools:
|
|
242
|
+
for tool in tools:
|
|
243
|
+
name = tool.get("name", "Unknown")
|
|
244
|
+
desc = tool.get("description", "No description")
|
|
245
|
+
output_lines.append(f" • {name}: {desc}")
|
|
246
|
+
else:
|
|
247
|
+
output_lines.append(" (No tools available)")
|
|
248
|
+
output_lines.append("")
|
|
249
|
+
|
|
250
|
+
# Format prompts
|
|
251
|
+
if "prompts" in results:
|
|
252
|
+
if "error" in results["prompts"]:
|
|
253
|
+
output_lines.append(f"❌ PROMPTS: Error - {results['prompts']['error']}")
|
|
254
|
+
else:
|
|
255
|
+
prompts = results["prompts"].get("prompts", [])
|
|
256
|
+
output_lines.append(f"💬 PROMPTS ({len(prompts)} available):")
|
|
257
|
+
if prompts:
|
|
258
|
+
for prompt in prompts:
|
|
259
|
+
name = prompt.get("name", "Unknown")
|
|
260
|
+
desc = prompt.get("description", "No description")
|
|
261
|
+
output_lines.append(f" • {name}: {desc}")
|
|
262
|
+
else:
|
|
263
|
+
output_lines.append(" (No prompts available)")
|
|
264
|
+
output_lines.append("")
|
|
265
|
+
|
|
266
|
+
# Format resources
|
|
267
|
+
if "resources" in results:
|
|
268
|
+
if "error" in results["resources"]:
|
|
269
|
+
output_lines.append(f"❌ RESOURCES: Error - {results['resources']['error']}")
|
|
270
|
+
else:
|
|
271
|
+
resources = results["resources"].get("resources", [])
|
|
272
|
+
output_lines.append(f"📁 RESOURCES ({len(resources)} available):")
|
|
273
|
+
if resources:
|
|
274
|
+
for resource in resources:
|
|
275
|
+
uri = resource.get("uri", "Unknown")
|
|
276
|
+
name = resource.get("name", uri)
|
|
277
|
+
desc = resource.get("description", "No description")
|
|
278
|
+
output_lines.append(f" • {name}: {desc}")
|
|
279
|
+
output_lines.append(f" URI: {uri}")
|
|
280
|
+
else:
|
|
281
|
+
output_lines.append(" (No resources available)")
|
|
282
|
+
output_lines.append("")
|
|
283
|
+
|
|
284
|
+
return "\n".join(output_lines)
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Remote Tool for direct HTTP/SSE invocation.
|
|
3
|
+
This tool is used for remote MCP servers accessed via HTTP/SSE.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
import uuid
|
|
11
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
12
|
+
from typing import Any, Dict, Optional
|
|
13
|
+
|
|
14
|
+
from .mcp_server_tool import McpServerTool
|
|
15
|
+
from pydantic import Field
|
|
16
|
+
from ..utils.mcp_oauth import (
|
|
17
|
+
McpAuthorizationRequired,
|
|
18
|
+
canonical_resource,
|
|
19
|
+
extract_resource_metadata_url,
|
|
20
|
+
fetch_resource_metadata_async,
|
|
21
|
+
infer_authorization_servers_from_realm,
|
|
22
|
+
)
|
|
23
|
+
from ..utils.mcp_sse_client import McpSseClient
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class McpRemoteTool(McpServerTool):
|
|
29
|
+
"""
|
|
30
|
+
Tool for invoking remote MCP server tools via HTTP/SSE.
|
|
31
|
+
Extends McpServerTool and overrides _run to use direct HTTP calls instead of client.mcp_tool_call.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
# Remote MCP connection details
|
|
35
|
+
server_url: str = Field(..., description="URL of the remote MCP server")
|
|
36
|
+
server_headers: Optional[Dict[str, str]] = Field(default=None, description="HTTP headers for authentication")
|
|
37
|
+
original_tool_name: Optional[str] = Field(default=None, description="Original tool name from MCP server (before optimization)")
|
|
38
|
+
is_prompt: bool = False # Flag to indicate if this is a prompt tool
|
|
39
|
+
prompt_name: Optional[str] = None # Original prompt name if this is a prompt
|
|
40
|
+
session_id: Optional[str] = Field(default=None, description="MCP session ID for stateful SSE servers")
|
|
41
|
+
|
|
42
|
+
def model_post_init(self, __context: Any) -> None:
|
|
43
|
+
"""Update metadata with session info after model initialization."""
|
|
44
|
+
super().model_post_init(__context)
|
|
45
|
+
self._update_metadata_with_session()
|
|
46
|
+
|
|
47
|
+
def _update_metadata_with_session(self):
|
|
48
|
+
"""Update the metadata dict with current session information."""
|
|
49
|
+
if self.session_id:
|
|
50
|
+
if self.metadata is None:
|
|
51
|
+
self.metadata = {}
|
|
52
|
+
self.metadata.update({
|
|
53
|
+
'mcp_session_id': self.session_id,
|
|
54
|
+
'mcp_server_url': canonical_resource(self.server_url)
|
|
55
|
+
})
|
|
56
|
+
|
|
57
|
+
def __getstate__(self):
|
|
58
|
+
"""Custom serialization for pickle compatibility."""
|
|
59
|
+
state = super().__getstate__()
|
|
60
|
+
# Ensure headers are serializable
|
|
61
|
+
if 'server_headers' in state and state['server_headers'] is not None:
|
|
62
|
+
state['server_headers'] = dict(state['server_headers'])
|
|
63
|
+
return state
|
|
64
|
+
|
|
65
|
+
def _run(self, *args, **kwargs):
|
|
66
|
+
"""
|
|
67
|
+
Execute the MCP tool via direct HTTP/SSE call to the remote server.
|
|
68
|
+
Overrides the parent method to avoid using client.mcp_tool_call.
|
|
69
|
+
"""
|
|
70
|
+
try:
|
|
71
|
+
# Always create a new event loop for sync context
|
|
72
|
+
with ThreadPoolExecutor() as executor:
|
|
73
|
+
future = executor.submit(self._run_in_new_loop, kwargs)
|
|
74
|
+
return future.result(timeout=self.tool_timeout_sec)
|
|
75
|
+
except McpAuthorizationRequired:
|
|
76
|
+
# Bubble up so LangChain can surface a tool error with useful metadata
|
|
77
|
+
raise
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error(f"Error executing remote MCP tool '{self.name}': {e}")
|
|
80
|
+
return f"Error executing tool: {e}"
|
|
81
|
+
|
|
82
|
+
def _run_in_new_loop(self, kwargs: Dict[str, Any]) -> str:
|
|
83
|
+
"""Run the async tool invocation in a new event loop."""
|
|
84
|
+
return asyncio.run(self._execute_remote_tool(kwargs))
|
|
85
|
+
|
|
86
|
+
async def _execute_remote_tool(self, kwargs: Dict[str, Any]) -> str:
|
|
87
|
+
"""Execute the actual remote MCP tool call using SSE client."""
|
|
88
|
+
from ...tools.utils import TOOLKIT_SPLITTER
|
|
89
|
+
|
|
90
|
+
# Check for session_id requirement
|
|
91
|
+
if not self.session_id:
|
|
92
|
+
logger.error(f"[MCP Session] Missing session_id for tool '{self.name}'")
|
|
93
|
+
raise Exception("sessionId required. Frontend must generate UUID and send with mcp_tokens.")
|
|
94
|
+
|
|
95
|
+
# Use the original tool name from discovery for MCP server invocation
|
|
96
|
+
tool_name_for_server = self.original_tool_name
|
|
97
|
+
if not tool_name_for_server:
|
|
98
|
+
tool_name_for_server = self.name.rsplit(TOOLKIT_SPLITTER, 1)[-1] if TOOLKIT_SPLITTER in self.name else self.name
|
|
99
|
+
logger.warning(f"original_tool_name not set for '{self.name}', using extracted: {tool_name_for_server}")
|
|
100
|
+
|
|
101
|
+
logger.info(f"[MCP SSE] Executing tool '{tool_name_for_server}' with session {self.session_id}")
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Prepare headers
|
|
105
|
+
headers = {}
|
|
106
|
+
if self.server_headers:
|
|
107
|
+
headers.update(self.server_headers)
|
|
108
|
+
|
|
109
|
+
# Create SSE client
|
|
110
|
+
client = McpSseClient(
|
|
111
|
+
url=self.server_url,
|
|
112
|
+
session_id=self.session_id,
|
|
113
|
+
headers=headers,
|
|
114
|
+
timeout=self.tool_timeout_sec
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Execute tool call via SSE
|
|
118
|
+
result = await client.call_tool(tool_name_for_server, kwargs)
|
|
119
|
+
|
|
120
|
+
# Format the result
|
|
121
|
+
if isinstance(result, dict):
|
|
122
|
+
# Check for content array (common in MCP responses)
|
|
123
|
+
if "content" in result:
|
|
124
|
+
content_items = result["content"]
|
|
125
|
+
if isinstance(content_items, list):
|
|
126
|
+
# Extract text from content items
|
|
127
|
+
text_parts = []
|
|
128
|
+
for item in content_items:
|
|
129
|
+
if isinstance(item, dict):
|
|
130
|
+
if item.get("type") == "text" and "text" in item:
|
|
131
|
+
text_parts.append(item["text"])
|
|
132
|
+
elif "text" in item:
|
|
133
|
+
text_parts.append(item["text"])
|
|
134
|
+
else:
|
|
135
|
+
text_parts.append(json.dumps(item))
|
|
136
|
+
else:
|
|
137
|
+
text_parts.append(str(item))
|
|
138
|
+
return "\n".join(text_parts)
|
|
139
|
+
|
|
140
|
+
# Return formatted JSON if no content field
|
|
141
|
+
return json.dumps(result, indent=2)
|
|
142
|
+
|
|
143
|
+
# Return as string for other types
|
|
144
|
+
return str(result)
|
|
145
|
+
|
|
146
|
+
except Exception as e:
|
|
147
|
+
logger.error(f"[MCP SSE] Tool execution failed: {e}", exc_info=True)
|
|
148
|
+
raise
|
|
149
|
+
|
|
150
|
+
def _parse_sse(self, text: str) -> Dict[str, Any]:
|
|
151
|
+
"""Parse Server-Sent Events (SSE) format response."""
|
|
152
|
+
for line in text.split('\n'):
|
|
153
|
+
line = line.strip()
|
|
154
|
+
if line.startswith('data:'):
|
|
155
|
+
json_str = line[5:].strip()
|
|
156
|
+
return json.loads(json_str)
|
|
157
|
+
raise ValueError("No data found in SSE response")
|
|
158
|
+
|
|
159
|
+
def get_session_metadata(self) -> dict:
|
|
160
|
+
"""Return session metadata to be included in tool responses."""
|
|
161
|
+
if self.session_id:
|
|
162
|
+
return {
|
|
163
|
+
'mcp_session_id': self.session_id,
|
|
164
|
+
'mcp_server_url': canonical_resource(self.server_url)
|
|
165
|
+
}
|
|
166
|
+
return {}
|
|
@@ -3,7 +3,7 @@ from logging import getLogger
|
|
|
3
3
|
from typing import Any, Type, Literal, Optional, Union, List
|
|
4
4
|
|
|
5
5
|
from langchain_core.tools import BaseTool
|
|
6
|
-
from pydantic import BaseModel, Field, create_model, EmailStr, constr
|
|
6
|
+
from pydantic import BaseModel, Field, create_model, EmailStr, constr, ConfigDict
|
|
7
7
|
|
|
8
8
|
from ...tools.utils import TOOLKIT_SPLITTER
|
|
9
9
|
|
|
@@ -19,6 +19,7 @@ class McpServerTool(BaseTool):
|
|
|
19
19
|
server: str
|
|
20
20
|
tool_timeout_sec: int = 60
|
|
21
21
|
|
|
22
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
22
23
|
|
|
23
24
|
@staticmethod
|
|
24
25
|
def create_pydantic_model_from_schema(schema: dict, model_name: str = "ArgsSchema"):
|
|
@@ -90,6 +91,7 @@ class McpServerTool(BaseTool):
|
|
|
90
91
|
return create_model(model_name, **fields)
|
|
91
92
|
|
|
92
93
|
def _run(self, *args, **kwargs):
|
|
94
|
+
# Extract the actual tool/prompt name (remove toolkit prefix)
|
|
93
95
|
call_data = {
|
|
94
96
|
"server": self.server,
|
|
95
97
|
"tool_timeout_sec": self.tool_timeout_sec,
|