alita-sdk 0.3.423__py3-none-any.whl → 0.3.435__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/clients/client.py +6 -2
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/langchain/constants.py +1 -1
- alita_sdk/runtime/langchain/langraph_agent.py +4 -1
- alita_sdk/runtime/models/mcp_models.py +57 -0
- alita_sdk/runtime/toolkits/__init__.py +24 -0
- alita_sdk/runtime/toolkits/mcp.py +787 -0
- alita_sdk/runtime/toolkits/tools.py +19 -2
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +79 -10
- alita_sdk/runtime/utils/streamlit.py +34 -3
- alita_sdk/runtime/utils/toolkit_utils.py +5 -2
- alita_sdk/tools/__init__.py +5 -0
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/gitlab/api_wrapper.py +5 -0
- alita_sdk/tools/qtest/api_wrapper.py +240 -39
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.435.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.435.dist-info}/RECORD +22 -17
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.435.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.435.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.423.dist-info → alita_sdk-0.3.435.dist-info}/top_level.txt +0 -0
|
@@ -12,6 +12,7 @@ from .datasource import DatasourcesToolkit
|
|
|
12
12
|
from .prompt import PromptToolkit
|
|
13
13
|
from .subgraph import SubgraphToolkit
|
|
14
14
|
from .vectorstore import VectorStoreToolkit
|
|
15
|
+
from .mcp import McpToolkit
|
|
15
16
|
from ..tools.mcp_server_tool import McpServerTool
|
|
16
17
|
from ..tools.sandbox import SandboxToolkit
|
|
17
18
|
from ..tools.image_generation import ImageGenerationToolkit
|
|
@@ -29,7 +30,8 @@ def get_toolkits():
|
|
|
29
30
|
MemoryToolkit.toolkit_config_schema(),
|
|
30
31
|
VectorStoreToolkit.toolkit_config_schema(),
|
|
31
32
|
SandboxToolkit.toolkit_config_schema(),
|
|
32
|
-
ImageGenerationToolkit.toolkit_config_schema()
|
|
33
|
+
ImageGenerationToolkit.toolkit_config_schema(),
|
|
34
|
+
McpToolkit.toolkit_config_schema()
|
|
33
35
|
]
|
|
34
36
|
|
|
35
37
|
return core_toolkits + community_toolkits() + alita_toolkits()
|
|
@@ -106,6 +108,11 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
|
106
108
|
llm=llm,
|
|
107
109
|
toolkit_name=tool.get('toolkit_name', ''),
|
|
108
110
|
**tool['settings']).get_tools())
|
|
111
|
+
elif tool['type'] == 'mcp':
|
|
112
|
+
tools.extend(McpToolkit.get_toolkit(
|
|
113
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
114
|
+
client=alita_client,
|
|
115
|
+
**tool['settings']).get_tools())
|
|
109
116
|
except Exception as e:
|
|
110
117
|
logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
|
|
111
118
|
if debug_mode:
|
|
@@ -121,7 +128,8 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
|
121
128
|
tools += community_tools(tools_list, alita_client, llm)
|
|
122
129
|
# Add alita tools
|
|
123
130
|
tools += alita_tools(tools_list, alita_client, llm, memory_store)
|
|
124
|
-
# Add MCP tools
|
|
131
|
+
# Add MCP tools registered via alita-mcp CLI (static registry)
|
|
132
|
+
# Note: Tools with type='mcp' are already handled in main loop above
|
|
125
133
|
tools += _mcp_tools(tools_list, alita_client)
|
|
126
134
|
|
|
127
135
|
# Sanitize tool names to meet OpenAI's function naming requirements
|
|
@@ -176,6 +184,10 @@ def _sanitize_tool_names(tools: list) -> list:
|
|
|
176
184
|
|
|
177
185
|
|
|
178
186
|
def _mcp_tools(tools_list, alita):
|
|
187
|
+
"""
|
|
188
|
+
Handle MCP tools registered via alita-mcp CLI (static registry).
|
|
189
|
+
Skips tools with type='mcp' as those are handled by dynamic discovery.
|
|
190
|
+
"""
|
|
179
191
|
try:
|
|
180
192
|
all_available_toolkits = alita.get_mcp_toolkits()
|
|
181
193
|
toolkit_lookup = {tk["name"]: tk for tk in all_available_toolkits}
|
|
@@ -183,6 +195,11 @@ def _mcp_tools(tools_list, alita):
|
|
|
183
195
|
#
|
|
184
196
|
for selected_toolkit in tools_list:
|
|
185
197
|
server_toolkit_name = selected_toolkit['type']
|
|
198
|
+
|
|
199
|
+
# Skip tools with type='mcp' - they're handled by dynamic discovery
|
|
200
|
+
if server_toolkit_name == 'mcp':
|
|
201
|
+
continue
|
|
202
|
+
|
|
186
203
|
toolkit_conf = toolkit_lookup.get(server_toolkit_name)
|
|
187
204
|
#
|
|
188
205
|
if not toolkit_conf:
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Server Inspection Tool.
|
|
3
|
+
Allows inspecting available tools, prompts, and resources on an MCP server.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Type, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from langchain_core.tools import BaseTool
|
|
13
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
14
|
+
import aiohttp
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class McpInspectInput(BaseModel):
|
|
20
|
+
"""Input schema for MCP server inspection tool."""
|
|
21
|
+
|
|
22
|
+
resource_type: str = Field(
|
|
23
|
+
default="all",
|
|
24
|
+
description="What to inspect: 'tools', 'prompts', 'resources', or 'all'"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class McpInspectTool(BaseTool):
|
|
29
|
+
"""Tool for inspecting available tools, prompts, and resources on an MCP server."""
|
|
30
|
+
|
|
31
|
+
name: str = "mcp_inspect"
|
|
32
|
+
description: str = "List available tools, prompts, and resources from the MCP server"
|
|
33
|
+
args_schema: Type[BaseModel] = McpInspectInput
|
|
34
|
+
return_type: str = "str"
|
|
35
|
+
|
|
36
|
+
# MCP server connection details
|
|
37
|
+
server_name: str = Field(..., description="Name of the MCP server")
|
|
38
|
+
server_url: str = Field(..., description="URL of the MCP server")
|
|
39
|
+
server_headers: Optional[Dict[str, str]] = Field(default=None, description="HTTP headers for authentication")
|
|
40
|
+
timeout: int = Field(default=30, description="Request timeout in seconds")
|
|
41
|
+
|
|
42
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
43
|
+
|
|
44
|
+
def __getstate__(self):
|
|
45
|
+
"""Custom serialization for pickle compatibility."""
|
|
46
|
+
state = self.__dict__.copy()
|
|
47
|
+
# Convert headers dict to regular dict to avoid any reference issues
|
|
48
|
+
if 'server_headers' in state and state['server_headers'] is not None:
|
|
49
|
+
state['server_headers'] = dict(state['server_headers'])
|
|
50
|
+
return state
|
|
51
|
+
|
|
52
|
+
def __setstate__(self, state):
|
|
53
|
+
"""Custom deserialization for pickle compatibility."""
|
|
54
|
+
# Initialize Pydantic internal attributes if needed
|
|
55
|
+
if '__pydantic_fields_set__' not in state:
|
|
56
|
+
state['__pydantic_fields_set__'] = set(state.keys())
|
|
57
|
+
if '__pydantic_extra__' not in state:
|
|
58
|
+
state['__pydantic_extra__'] = None
|
|
59
|
+
if '__pydantic_private__' not in state:
|
|
60
|
+
state['__pydantic_private__'] = None
|
|
61
|
+
|
|
62
|
+
# Update object state
|
|
63
|
+
self.__dict__.update(state)
|
|
64
|
+
|
|
65
|
+
def _run(self, resource_type: str = "all") -> str:
|
|
66
|
+
"""Inspect the MCP server for available resources."""
|
|
67
|
+
try:
|
|
68
|
+
# Always create a new event loop for sync context
|
|
69
|
+
# This avoids issues with existing event loops in threads
|
|
70
|
+
import concurrent.futures
|
|
71
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
72
|
+
future = executor.submit(self._run_in_new_loop, resource_type)
|
|
73
|
+
return future.result(timeout=self.timeout)
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.error(f"Error inspecting MCP server '{self.server_name}': {e}")
|
|
76
|
+
return f"Error inspecting MCP server: {e}"
|
|
77
|
+
|
|
78
|
+
def _run_in_new_loop(self, resource_type: str) -> str:
|
|
79
|
+
"""Run the async inspection in a new event loop."""
|
|
80
|
+
return asyncio.run(self._inspect_server(resource_type))
|
|
81
|
+
|
|
82
|
+
async def _inspect_server(self, resource_type: str) -> str:
|
|
83
|
+
"""Perform the actual MCP server inspection."""
|
|
84
|
+
results = {}
|
|
85
|
+
|
|
86
|
+
# Determine what to inspect
|
|
87
|
+
inspect_tools = resource_type in ["all", "tools"]
|
|
88
|
+
inspect_prompts = resource_type in ["all", "prompts"]
|
|
89
|
+
inspect_resources = resource_type in ["all", "resources"]
|
|
90
|
+
|
|
91
|
+
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.timeout)) as session:
|
|
92
|
+
|
|
93
|
+
# List tools
|
|
94
|
+
if inspect_tools:
|
|
95
|
+
try:
|
|
96
|
+
tools = await self._list_tools(session)
|
|
97
|
+
results["tools"] = tools
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.warning(f"Failed to list tools from {self.server_name}: {e}")
|
|
100
|
+
results["tools"] = {"error": str(e)}
|
|
101
|
+
|
|
102
|
+
# List prompts
|
|
103
|
+
if inspect_prompts:
|
|
104
|
+
try:
|
|
105
|
+
prompts = await self._list_prompts(session)
|
|
106
|
+
results["prompts"] = prompts
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.warning(f"Failed to list prompts from {self.server_name}: {e}")
|
|
109
|
+
results["prompts"] = {"error": str(e)}
|
|
110
|
+
|
|
111
|
+
# List resources
|
|
112
|
+
if inspect_resources:
|
|
113
|
+
try:
|
|
114
|
+
resources = await self._list_resources(session)
|
|
115
|
+
results["resources"] = resources
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.warning(f"Failed to list resources from {self.server_name}: {e}")
|
|
118
|
+
results["resources"] = {"error": str(e)}
|
|
119
|
+
|
|
120
|
+
return self._format_results(results, resource_type)
|
|
121
|
+
|
|
122
|
+
def _parse_sse(self, text: str) -> Dict[str, Any]:
|
|
123
|
+
"""Parse Server-Sent Events (SSE) format response."""
|
|
124
|
+
for line in text.split('\n'):
|
|
125
|
+
line = line.strip()
|
|
126
|
+
if line.startswith('data:'):
|
|
127
|
+
json_str = line[5:].strip()
|
|
128
|
+
return json.loads(json_str)
|
|
129
|
+
raise ValueError("No data found in SSE response")
|
|
130
|
+
|
|
131
|
+
async def _list_tools(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
132
|
+
"""List available tools from the MCP server."""
|
|
133
|
+
request = {
|
|
134
|
+
"jsonrpc": "2.0",
|
|
135
|
+
"id": f"list_tools_{int(time.time())}",
|
|
136
|
+
"method": "tools/list",
|
|
137
|
+
"params": {}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
headers = {
|
|
141
|
+
"Content-Type": "application/json",
|
|
142
|
+
"Accept": "application/json, text/event-stream",
|
|
143
|
+
**self.server_headers
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
147
|
+
if response.status != 200:
|
|
148
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
149
|
+
|
|
150
|
+
# Handle both JSON and SSE responses
|
|
151
|
+
content_type = response.headers.get('Content-Type', '')
|
|
152
|
+
if 'text/event-stream' in content_type:
|
|
153
|
+
# Parse SSE format
|
|
154
|
+
text = await response.text()
|
|
155
|
+
data = self._parse_sse(text)
|
|
156
|
+
else:
|
|
157
|
+
data = await response.json()
|
|
158
|
+
|
|
159
|
+
if "error" in data:
|
|
160
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
161
|
+
|
|
162
|
+
return data.get("result", {})
|
|
163
|
+
|
|
164
|
+
async def _list_prompts(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
165
|
+
"""List available prompts from the MCP server."""
|
|
166
|
+
request = {
|
|
167
|
+
"jsonrpc": "2.0",
|
|
168
|
+
"id": f"list_prompts_{int(time.time())}",
|
|
169
|
+
"method": "prompts/list",
|
|
170
|
+
"params": {}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
headers = {
|
|
174
|
+
"Content-Type": "application/json",
|
|
175
|
+
"Accept": "application/json, text/event-stream",
|
|
176
|
+
**self.server_headers
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
180
|
+
if response.status != 200:
|
|
181
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
182
|
+
|
|
183
|
+
# Handle both JSON and SSE responses
|
|
184
|
+
content_type = response.headers.get('Content-Type', '')
|
|
185
|
+
if 'text/event-stream' in content_type:
|
|
186
|
+
text = await response.text()
|
|
187
|
+
data = self._parse_sse(text)
|
|
188
|
+
else:
|
|
189
|
+
data = await response.json()
|
|
190
|
+
|
|
191
|
+
if "error" in data:
|
|
192
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
193
|
+
|
|
194
|
+
return data.get("result", {})
|
|
195
|
+
|
|
196
|
+
async def _list_resources(self, session: aiohttp.ClientSession) -> Dict[str, Any]:
|
|
197
|
+
"""List available resources from the MCP server."""
|
|
198
|
+
request = {
|
|
199
|
+
"jsonrpc": "2.0",
|
|
200
|
+
"id": f"list_resources_{int(time.time())}",
|
|
201
|
+
"method": "resources/list",
|
|
202
|
+
"params": {}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
headers = {
|
|
206
|
+
"Content-Type": "application/json",
|
|
207
|
+
"Accept": "application/json, text/event-stream",
|
|
208
|
+
**self.server_headers
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
async with session.post(self.server_url, json=request, headers=headers) as response:
|
|
212
|
+
if response.status != 200:
|
|
213
|
+
raise Exception(f"HTTP {response.status}: {await response.text()}")
|
|
214
|
+
|
|
215
|
+
# Handle both JSON and SSE responses
|
|
216
|
+
content_type = response.headers.get('Content-Type', '')
|
|
217
|
+
if 'text/event-stream' in content_type:
|
|
218
|
+
text = await response.text()
|
|
219
|
+
data = self._parse_sse(text)
|
|
220
|
+
else:
|
|
221
|
+
data = await response.json()
|
|
222
|
+
|
|
223
|
+
if "error" in data:
|
|
224
|
+
raise Exception(f"MCP Error: {data['error']}")
|
|
225
|
+
|
|
226
|
+
return data.get("result", {})
|
|
227
|
+
|
|
228
|
+
def _format_results(self, results: Dict[str, Any], resource_type: str) -> str:
|
|
229
|
+
"""Format the inspection results for display."""
|
|
230
|
+
output_lines = [f"=== MCP Server Inspection: {self.server_name} ==="]
|
|
231
|
+
output_lines.append(f"Server URL: {self.server_url}")
|
|
232
|
+
output_lines.append("")
|
|
233
|
+
|
|
234
|
+
# Format tools
|
|
235
|
+
if "tools" in results:
|
|
236
|
+
if "error" in results["tools"]:
|
|
237
|
+
output_lines.append(f"❌ TOOLS: Error - {results['tools']['error']}")
|
|
238
|
+
else:
|
|
239
|
+
tools = results["tools"].get("tools", [])
|
|
240
|
+
output_lines.append(f"🔧 TOOLS ({len(tools)} available):")
|
|
241
|
+
if tools:
|
|
242
|
+
for tool in tools:
|
|
243
|
+
name = tool.get("name", "Unknown")
|
|
244
|
+
desc = tool.get("description", "No description")
|
|
245
|
+
output_lines.append(f" • {name}: {desc}")
|
|
246
|
+
else:
|
|
247
|
+
output_lines.append(" (No tools available)")
|
|
248
|
+
output_lines.append("")
|
|
249
|
+
|
|
250
|
+
# Format prompts
|
|
251
|
+
if "prompts" in results:
|
|
252
|
+
if "error" in results["prompts"]:
|
|
253
|
+
output_lines.append(f"❌ PROMPTS: Error - {results['prompts']['error']}")
|
|
254
|
+
else:
|
|
255
|
+
prompts = results["prompts"].get("prompts", [])
|
|
256
|
+
output_lines.append(f"💬 PROMPTS ({len(prompts)} available):")
|
|
257
|
+
if prompts:
|
|
258
|
+
for prompt in prompts:
|
|
259
|
+
name = prompt.get("name", "Unknown")
|
|
260
|
+
desc = prompt.get("description", "No description")
|
|
261
|
+
output_lines.append(f" • {name}: {desc}")
|
|
262
|
+
else:
|
|
263
|
+
output_lines.append(" (No prompts available)")
|
|
264
|
+
output_lines.append("")
|
|
265
|
+
|
|
266
|
+
# Format resources
|
|
267
|
+
if "resources" in results:
|
|
268
|
+
if "error" in results["resources"]:
|
|
269
|
+
output_lines.append(f"❌ RESOURCES: Error - {results['resources']['error']}")
|
|
270
|
+
else:
|
|
271
|
+
resources = results["resources"].get("resources", [])
|
|
272
|
+
output_lines.append(f"📁 RESOURCES ({len(resources)} available):")
|
|
273
|
+
if resources:
|
|
274
|
+
for resource in resources:
|
|
275
|
+
uri = resource.get("uri", "Unknown")
|
|
276
|
+
name = resource.get("name", uri)
|
|
277
|
+
desc = resource.get("description", "No description")
|
|
278
|
+
output_lines.append(f" • {name}: {desc}")
|
|
279
|
+
output_lines.append(f" URI: {uri}")
|
|
280
|
+
else:
|
|
281
|
+
output_lines.append(" (No resources available)")
|
|
282
|
+
output_lines.append("")
|
|
283
|
+
|
|
284
|
+
return "\n".join(output_lines)
|
|
@@ -3,7 +3,7 @@ from logging import getLogger
|
|
|
3
3
|
from typing import Any, Type, Literal, Optional, Union, List
|
|
4
4
|
|
|
5
5
|
from langchain_core.tools import BaseTool
|
|
6
|
-
from pydantic import BaseModel, Field, create_model, EmailStr, constr
|
|
6
|
+
from pydantic import BaseModel, Field, create_model, EmailStr, constr, ConfigDict
|
|
7
7
|
|
|
8
8
|
from ...tools.utils import TOOLKIT_SPLITTER
|
|
9
9
|
|
|
@@ -15,9 +15,61 @@ class McpServerTool(BaseTool):
|
|
|
15
15
|
description: str
|
|
16
16
|
args_schema: Optional[Type[BaseModel]] = None
|
|
17
17
|
return_type: str = "str"
|
|
18
|
-
client: Any
|
|
18
|
+
client: Any = Field(default=None, exclude=True) # Exclude from serialization
|
|
19
19
|
server: str
|
|
20
20
|
tool_timeout_sec: int = 60
|
|
21
|
+
is_prompt: bool = False # Flag to indicate if this is a prompt tool
|
|
22
|
+
prompt_name: Optional[str] = None # Original prompt name if this is a prompt
|
|
23
|
+
|
|
24
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
25
|
+
|
|
26
|
+
def __getstate__(self):
|
|
27
|
+
"""Custom serialization to exclude non-serializable objects."""
|
|
28
|
+
state = self.__dict__.copy()
|
|
29
|
+
# Remove the client since it contains threading objects that can't be pickled
|
|
30
|
+
state['client'] = None
|
|
31
|
+
# Store args_schema as a schema dict instead of the dynamic class
|
|
32
|
+
if hasattr(self, 'args_schema') and self.args_schema is not None:
|
|
33
|
+
# Convert the Pydantic model back to schema dict for pickling
|
|
34
|
+
try:
|
|
35
|
+
state['_args_schema_dict'] = self.args_schema.model_json_schema()
|
|
36
|
+
state['args_schema'] = None
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.warning(f"Failed to serialize args_schema: {e}")
|
|
39
|
+
# If conversion fails, just remove it
|
|
40
|
+
state['args_schema'] = None
|
|
41
|
+
state['_args_schema_dict'] = {}
|
|
42
|
+
return state
|
|
43
|
+
|
|
44
|
+
def __setstate__(self, state):
|
|
45
|
+
"""Custom deserialization to handle missing objects."""
|
|
46
|
+
# Restore the args_schema from the stored schema dict
|
|
47
|
+
args_schema_dict = state.pop('_args_schema_dict', {})
|
|
48
|
+
|
|
49
|
+
# Initialize required Pydantic internal attributes
|
|
50
|
+
if '__pydantic_fields_set__' not in state:
|
|
51
|
+
state['__pydantic_fields_set__'] = set(state.keys())
|
|
52
|
+
if '__pydantic_extra__' not in state:
|
|
53
|
+
state['__pydantic_extra__'] = None
|
|
54
|
+
if '__pydantic_private__' not in state:
|
|
55
|
+
state['__pydantic_private__'] = None
|
|
56
|
+
|
|
57
|
+
# Directly update the object's __dict__ to bypass Pydantic validation
|
|
58
|
+
self.__dict__.update(state)
|
|
59
|
+
|
|
60
|
+
# Recreate the args_schema from the stored dict if available
|
|
61
|
+
if args_schema_dict:
|
|
62
|
+
try:
|
|
63
|
+
recreated_schema = self.create_pydantic_model_from_schema(args_schema_dict)
|
|
64
|
+
self.__dict__['args_schema'] = recreated_schema
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.warning(f"Failed to recreate args_schema: {e}")
|
|
67
|
+
self.__dict__['args_schema'] = None
|
|
68
|
+
else:
|
|
69
|
+
self.__dict__['args_schema'] = None
|
|
70
|
+
|
|
71
|
+
# Note: client will be None after unpickling
|
|
72
|
+
# The toolkit should reinitialize the client when needed
|
|
21
73
|
|
|
22
74
|
|
|
23
75
|
@staticmethod
|
|
@@ -90,14 +142,31 @@ class McpServerTool(BaseTool):
|
|
|
90
142
|
return create_model(model_name, **fields)
|
|
91
143
|
|
|
92
144
|
def _run(self, *args, **kwargs):
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
"
|
|
145
|
+
# Extract the actual tool/prompt name (remove toolkit prefix)
|
|
146
|
+
actual_name = self.name.rsplit(TOOLKIT_SPLITTER)[1] if TOOLKIT_SPLITTER in self.name else self.name
|
|
147
|
+
|
|
148
|
+
if self.is_prompt:
|
|
149
|
+
# For prompts, use prompts/get endpoint
|
|
150
|
+
call_data = {
|
|
151
|
+
"server": self.server,
|
|
152
|
+
"tool_timeout_sec": self.tool_timeout_sec,
|
|
153
|
+
"tool_call_id": str(uuid.uuid4()),
|
|
154
|
+
"method": "prompts/get",
|
|
155
|
+
"params": {
|
|
156
|
+
"name": self.prompt_name or actual_name.replace("prompt_", ""),
|
|
157
|
+
"arguments": kwargs.get("arguments", kwargs)
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
else:
|
|
161
|
+
# For regular tools, use tools/call endpoint
|
|
162
|
+
call_data = {
|
|
163
|
+
"server": self.server,
|
|
164
|
+
"tool_timeout_sec": self.tool_timeout_sec,
|
|
165
|
+
"tool_call_id": str(uuid.uuid4()),
|
|
166
|
+
"params": {
|
|
167
|
+
"name": actual_name,
|
|
168
|
+
"arguments": kwargs
|
|
169
|
+
}
|
|
100
170
|
}
|
|
101
|
-
}
|
|
102
171
|
|
|
103
172
|
return self.client.mcp_tool_call(call_data)
|
|
@@ -868,10 +868,24 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
|
|
|
868
868
|
label = f"{'🔒 ' if is_secret else ''}{'*' if is_required else ''}{field_name.replace('_', ' ').title()}"
|
|
869
869
|
|
|
870
870
|
if field_type == 'string':
|
|
871
|
-
if
|
|
871
|
+
# Check if this is an enum field
|
|
872
|
+
if field_schema.get('enum'):
|
|
873
|
+
# Dropdown for enum values
|
|
874
|
+
options = field_schema['enum']
|
|
875
|
+
default_index = 0
|
|
876
|
+
if default_value and str(default_value) in options:
|
|
877
|
+
default_index = options.index(str(default_value))
|
|
878
|
+
toolkit_config_values[field_name] = st.selectbox(
|
|
879
|
+
label,
|
|
880
|
+
options=options,
|
|
881
|
+
index=default_index,
|
|
882
|
+
help=field_description,
|
|
883
|
+
key=f"config_{field_name}_{selected_toolkit_idx}"
|
|
884
|
+
)
|
|
885
|
+
elif is_secret:
|
|
872
886
|
toolkit_config_values[field_name] = st.text_input(
|
|
873
887
|
label,
|
|
874
|
-
value=str(default_value) if default_value else '',
|
|
888
|
+
value=str(default_value) if default_value else '',
|
|
875
889
|
help=field_description,
|
|
876
890
|
type="password",
|
|
877
891
|
key=f"config_{field_name}_{selected_toolkit_idx}"
|
|
@@ -879,7 +893,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
|
|
|
879
893
|
else:
|
|
880
894
|
toolkit_config_values[field_name] = st.text_input(
|
|
881
895
|
label,
|
|
882
|
-
value=str(default_value) if default_value else '',
|
|
896
|
+
value=str(default_value) if default_value else '',
|
|
883
897
|
help=field_description,
|
|
884
898
|
key=f"config_{field_name}_{selected_toolkit_idx}"
|
|
885
899
|
)
|
|
@@ -971,6 +985,23 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
|
|
|
971
985
|
key=f"config_{field_name}_{selected_toolkit_idx}"
|
|
972
986
|
)
|
|
973
987
|
toolkit_config_values[field_name] = [line.strip() for line in array_input.split('\n') if line.strip()]
|
|
988
|
+
elif field_type == 'object':
|
|
989
|
+
# Handle object/dict types (like headers)
|
|
990
|
+
obj_input = st.text_area(
|
|
991
|
+
f"{label} (JSON object)",
|
|
992
|
+
value=json.dumps(default_value) if isinstance(default_value, dict) else str(default_value) if default_value else '',
|
|
993
|
+
help=f"{field_description} - Enter as JSON object, e.g. {{\"Authorization\": \"Bearer token\"}}",
|
|
994
|
+
placeholder='{"key": "value"}',
|
|
995
|
+
key=f"config_{field_name}_{selected_toolkit_idx}"
|
|
996
|
+
)
|
|
997
|
+
try:
|
|
998
|
+
if obj_input.strip():
|
|
999
|
+
toolkit_config_values[field_name] = json.loads(obj_input)
|
|
1000
|
+
else:
|
|
1001
|
+
toolkit_config_values[field_name] = None
|
|
1002
|
+
except json.JSONDecodeError as e:
|
|
1003
|
+
st.error(f"Invalid JSON format for {field_name}: {e}")
|
|
1004
|
+
toolkit_config_values[field_name] = None
|
|
974
1005
|
else:
|
|
975
1006
|
st.info("This toolkit doesn't require additional configuration.")
|
|
976
1007
|
|
|
@@ -46,11 +46,14 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
|
|
|
46
46
|
# Log the configuration being used
|
|
47
47
|
logger.info(f"Instantiating toolkit {toolkit_name} with LLM client")
|
|
48
48
|
logger.debug(f"Toolkit {toolkit_name} configuration: {toolkit_config}")
|
|
49
|
-
|
|
49
|
+
|
|
50
|
+
# Use toolkit type from config, or fall back to lowercase toolkit name
|
|
51
|
+
toolkit_type = toolkit_config.get('type', toolkit_name.lower())
|
|
52
|
+
|
|
50
53
|
# Create a tool configuration dict with required fields
|
|
51
54
|
tool_config = {
|
|
52
55
|
'id': toolkit_config.get('id', random.randint(1, 1000000)),
|
|
53
|
-
'type': toolkit_config.get('type',
|
|
56
|
+
'type': toolkit_config.get('type', toolkit_type),
|
|
54
57
|
'settings': settings,
|
|
55
58
|
'toolkit_name': toolkit_name
|
|
56
59
|
}
|
alita_sdk/tools/__init__.py
CHANGED
|
@@ -131,6 +131,11 @@ def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args,
|
|
|
131
131
|
logger.error(f"Error getting ADO repos tools: {e}")
|
|
132
132
|
continue
|
|
133
133
|
|
|
134
|
+
# Skip MCP toolkit - it's handled by runtime/toolkits/tools.py to avoid duplicate loading
|
|
135
|
+
if tool_type == 'mcp':
|
|
136
|
+
logger.debug(f"Skipping MCP toolkit '{tool.get('toolkit_name')}' - handled by runtime toolkit system")
|
|
137
|
+
continue
|
|
138
|
+
|
|
134
139
|
# Handle standard tools
|
|
135
140
|
if tool_type in AVAILABLE_TOOLS and 'get_tools' in AVAILABLE_TOOLS[tool_type]:
|
|
136
141
|
try:
|
|
@@ -6,7 +6,7 @@ from langchain_core.prompts import ChatPromptTemplate
|
|
|
6
6
|
from langchain.text_splitter import TokenTextSplitter
|
|
7
7
|
|
|
8
8
|
from typing import Optional, List
|
|
9
|
-
from
|
|
9
|
+
from pydantic import BaseModel
|
|
10
10
|
from ..utils import tiktoken_length
|
|
11
11
|
|
|
12
12
|
logger = getLogger(__name__)
|
|
@@ -115,6 +115,11 @@ class GitLabAPIWrapper(CodeIndexerToolkit):
|
|
|
115
115
|
"""Remove trailing slash from URL if present."""
|
|
116
116
|
return url.rstrip('/') if url else url
|
|
117
117
|
|
|
118
|
+
@model_validator(mode='before')
|
|
119
|
+
@classmethod
|
|
120
|
+
def validate_toolkit_before(cls, values: Dict) -> Dict:
|
|
121
|
+
return super().validate_toolkit(values)
|
|
122
|
+
|
|
118
123
|
@model_validator(mode='after')
|
|
119
124
|
def validate_toolkit(self):
|
|
120
125
|
try:
|