alita-sdk 0.3.435__py3-none-any.whl → 0.3.457__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/runtime/clients/client.py +39 -7
- alita_sdk/runtime/langchain/assistant.py +10 -2
- alita_sdk/runtime/langchain/langraph_agent.py +57 -15
- alita_sdk/runtime/langchain/utils.py +19 -3
- alita_sdk/runtime/models/mcp_models.py +4 -0
- alita_sdk/runtime/toolkits/artifact.py +5 -6
- alita_sdk/runtime/toolkits/mcp.py +258 -150
- alita_sdk/runtime/toolkits/tools.py +44 -2
- alita_sdk/runtime/tools/function.py +2 -1
- alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +9 -76
- alita_sdk/runtime/tools/vectorstore_base.py +17 -2
- alita_sdk/runtime/utils/mcp_oauth.py +164 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/toolkit_utils.py +9 -2
- alita_sdk/tools/ado/repos/__init__.py +1 -0
- alita_sdk/tools/ado/test_plan/__init__.py +1 -1
- alita_sdk/tools/ado/wiki/__init__.py +1 -5
- alita_sdk/tools/ado/work_item/__init__.py +1 -5
- alita_sdk/tools/base_indexer_toolkit.py +10 -6
- alita_sdk/tools/bitbucket/__init__.py +1 -0
- alita_sdk/tools/code/sonar/__init__.py +1 -1
- alita_sdk/tools/confluence/__init__.py +2 -2
- alita_sdk/tools/github/__init__.py +2 -2
- alita_sdk/tools/gitlab/__init__.py +2 -1
- alita_sdk/tools/gitlab_org/__init__.py +1 -2
- alita_sdk/tools/google_places/__init__.py +2 -1
- alita_sdk/tools/jira/__init__.py +1 -0
- alita_sdk/tools/memory/__init__.py +1 -1
- alita_sdk/tools/pandas/__init__.py +1 -1
- alita_sdk/tools/postman/__init__.py +2 -1
- alita_sdk/tools/pptx/__init__.py +2 -2
- alita_sdk/tools/qtest/__init__.py +3 -3
- alita_sdk/tools/qtest/api_wrapper.py +374 -29
- alita_sdk/tools/rally/__init__.py +1 -2
- alita_sdk/tools/report_portal/__init__.py +1 -0
- alita_sdk/tools/salesforce/__init__.py +1 -0
- alita_sdk/tools/servicenow/__init__.py +2 -3
- alita_sdk/tools/sharepoint/__init__.py +1 -0
- alita_sdk/tools/slack/__init__.py +1 -0
- alita_sdk/tools/sql/__init__.py +2 -1
- alita_sdk/tools/testio/__init__.py +1 -0
- alita_sdk/tools/testrail/__init__.py +1 -3
- alita_sdk/tools/xray/__init__.py +2 -1
- alita_sdk/tools/zephyr/__init__.py +2 -1
- alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
- alita_sdk/tools/zephyr_essential/__init__.py +1 -0
- alita_sdk/tools/zephyr_scale/__init__.py +1 -0
- alita_sdk/tools/zephyr_squad/__init__.py +1 -0
- {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/METADATA +2 -1
- {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/RECORD +54 -51
- {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.435.dist-info → alita_sdk-0.3.457.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP SSE (Server-Sent Events) Client
|
|
3
|
+
Handles persistent SSE connections for MCP servers like Atlassian
|
|
4
|
+
"""
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, Any, Optional, AsyncIterator
|
|
9
|
+
import aiohttp
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class McpSseClient:
|
|
15
|
+
"""
|
|
16
|
+
Client for MCP servers using SSE (Server-Sent Events) transport.
|
|
17
|
+
|
|
18
|
+
For Atlassian-style SSE (dual-connection model):
|
|
19
|
+
- GET request opens persistent SSE stream for receiving events
|
|
20
|
+
- POST requests send commands (return 202 Accepted immediately)
|
|
21
|
+
- Responses come via the GET stream
|
|
22
|
+
|
|
23
|
+
This client handles:
|
|
24
|
+
- Opening persistent SSE connection via GET
|
|
25
|
+
- Sending JSON-RPC requests via POST
|
|
26
|
+
- Reading SSE event streams
|
|
27
|
+
- Matching responses to requests by ID
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, url: str, session_id: str, headers: Optional[Dict[str, str]] = None, timeout: int = 300):
|
|
31
|
+
"""
|
|
32
|
+
Initialize SSE client.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
url: Base URL of the MCP SSE server
|
|
36
|
+
session_id: Client-generated UUID for session
|
|
37
|
+
headers: Additional headers (e.g., Authorization)
|
|
38
|
+
timeout: Request timeout in seconds
|
|
39
|
+
"""
|
|
40
|
+
self.url = url
|
|
41
|
+
self.session_id = session_id
|
|
42
|
+
self.headers = headers or {}
|
|
43
|
+
self.timeout = timeout
|
|
44
|
+
self.url_with_session = f"{url}?sessionId={session_id}"
|
|
45
|
+
self._stream_task = None
|
|
46
|
+
self._pending_requests = {} # request_id -> asyncio.Future
|
|
47
|
+
self._stream_session = None
|
|
48
|
+
self._stream_response = None
|
|
49
|
+
self._endpoint_ready = asyncio.Event() # Signal when endpoint is received
|
|
50
|
+
|
|
51
|
+
logger.info(f"[MCP SSE Client] Initialized for {url} with session {session_id}")
|
|
52
|
+
|
|
53
|
+
async def _ensure_stream_connected(self):
|
|
54
|
+
"""Ensure the GET stream is connected and reading events."""
|
|
55
|
+
if self._stream_task is None or self._stream_task.done():
|
|
56
|
+
logger.info(f"[MCP SSE Client] Opening persistent SSE stream...")
|
|
57
|
+
self._stream_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=None))
|
|
58
|
+
|
|
59
|
+
headers = {
|
|
60
|
+
"Accept": "text/event-stream",
|
|
61
|
+
**self.headers
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
self._stream_response = await self._stream_session.get(self.url_with_session, headers=headers)
|
|
65
|
+
|
|
66
|
+
logger.info(f"[MCP SSE Client] Stream opened: status={self._stream_response.status}")
|
|
67
|
+
|
|
68
|
+
# Handle 401 Unauthorized - need OAuth
|
|
69
|
+
if self._stream_response.status == 401:
|
|
70
|
+
from ..utils.mcp_oauth import (
|
|
71
|
+
McpAuthorizationRequired,
|
|
72
|
+
canonical_resource,
|
|
73
|
+
extract_resource_metadata_url,
|
|
74
|
+
fetch_resource_metadata_async,
|
|
75
|
+
infer_authorization_servers_from_realm,
|
|
76
|
+
fetch_oauth_authorization_server_metadata
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
auth_header = self._stream_response.headers.get('WWW-Authenticate', '')
|
|
80
|
+
resource_metadata_url = extract_resource_metadata_url(auth_header, self.url)
|
|
81
|
+
|
|
82
|
+
metadata = None
|
|
83
|
+
if resource_metadata_url:
|
|
84
|
+
metadata = await fetch_resource_metadata_async(
|
|
85
|
+
resource_metadata_url,
|
|
86
|
+
session=self._stream_session,
|
|
87
|
+
timeout=30
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Infer authorization servers if not in metadata
|
|
91
|
+
if not metadata or not metadata.get('authorization_servers'):
|
|
92
|
+
inferred_servers = infer_authorization_servers_from_realm(auth_header, self.url)
|
|
93
|
+
if inferred_servers:
|
|
94
|
+
if not metadata:
|
|
95
|
+
metadata = {}
|
|
96
|
+
metadata['authorization_servers'] = inferred_servers
|
|
97
|
+
logger.info(f"[MCP SSE Client] Inferred authorization servers: {inferred_servers}")
|
|
98
|
+
|
|
99
|
+
# Fetch OAuth metadata
|
|
100
|
+
auth_server_metadata = fetch_oauth_authorization_server_metadata(inferred_servers[0], timeout=30)
|
|
101
|
+
if auth_server_metadata:
|
|
102
|
+
metadata['oauth_authorization_server'] = auth_server_metadata
|
|
103
|
+
logger.info(f"[MCP SSE Client] Fetched OAuth metadata")
|
|
104
|
+
|
|
105
|
+
raise McpAuthorizationRequired(
|
|
106
|
+
message=f"MCP server {self.url} requires OAuth authorization",
|
|
107
|
+
server_url=canonical_resource(self.url),
|
|
108
|
+
resource_metadata_url=resource_metadata_url,
|
|
109
|
+
www_authenticate=auth_header,
|
|
110
|
+
resource_metadata=metadata,
|
|
111
|
+
status=self._stream_response.status,
|
|
112
|
+
tool_name=self.url,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
if self._stream_response.status != 200:
|
|
116
|
+
error_text = await self._stream_response.text()
|
|
117
|
+
raise Exception(f"Failed to open SSE stream: HTTP {self._stream_response.status}: {error_text}")
|
|
118
|
+
|
|
119
|
+
# Start background task to read stream
|
|
120
|
+
self._stream_task = asyncio.create_task(self._read_stream())
|
|
121
|
+
|
|
122
|
+
async def _read_stream(self):
|
|
123
|
+
"""Background task that continuously reads the SSE stream."""
|
|
124
|
+
logger.info(f"[MCP SSE Client] Starting stream reader...")
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
buffer = ""
|
|
128
|
+
current_event = {}
|
|
129
|
+
|
|
130
|
+
async for chunk in self._stream_response.content.iter_chunked(1024):
|
|
131
|
+
chunk_str = chunk.decode('utf-8')
|
|
132
|
+
buffer += chunk_str
|
|
133
|
+
|
|
134
|
+
# Process complete lines
|
|
135
|
+
while '\n' in buffer:
|
|
136
|
+
line, buffer = buffer.split('\n', 1)
|
|
137
|
+
line_str = line.strip()
|
|
138
|
+
|
|
139
|
+
# Empty line indicates end of event
|
|
140
|
+
if not line_str:
|
|
141
|
+
if current_event and 'data' in current_event:
|
|
142
|
+
self._process_event(current_event)
|
|
143
|
+
current_event = {}
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
# Parse SSE fields
|
|
147
|
+
if line_str.startswith('event:'):
|
|
148
|
+
current_event['event'] = line_str[6:].strip()
|
|
149
|
+
elif line_str.startswith('data:'):
|
|
150
|
+
data_str = line_str[5:].strip()
|
|
151
|
+
current_event['data'] = data_str
|
|
152
|
+
elif line_str.startswith('id:'):
|
|
153
|
+
current_event['id'] = line_str[3:].strip()
|
|
154
|
+
|
|
155
|
+
except Exception as e:
|
|
156
|
+
logger.error(f"[MCP SSE Client] Stream reader error: {e}")
|
|
157
|
+
# Fail all pending requests
|
|
158
|
+
for future in self._pending_requests.values():
|
|
159
|
+
if not future.done():
|
|
160
|
+
future.set_exception(e)
|
|
161
|
+
finally:
|
|
162
|
+
logger.info(f"[MCP SSE Client] Stream reader stopped")
|
|
163
|
+
|
|
164
|
+
def _process_event(self, event: Dict[str, str]):
|
|
165
|
+
"""Process a complete SSE event."""
|
|
166
|
+
event_type = event.get('event', 'message')
|
|
167
|
+
data_str = event.get('data', '')
|
|
168
|
+
|
|
169
|
+
# Handle 'endpoint' event - server provides the actual session URL to use
|
|
170
|
+
if event_type == 'endpoint':
|
|
171
|
+
# Extract session ID from endpoint URL
|
|
172
|
+
# Format: /v1/sse?sessionId=<uuid>
|
|
173
|
+
if 'sessionId=' in data_str:
|
|
174
|
+
new_session_id = data_str.split('sessionId=')[1].split('&')[0]
|
|
175
|
+
logger.info(f"[MCP SSE Client] Server provided session ID: {new_session_id}")
|
|
176
|
+
self.session_id = new_session_id
|
|
177
|
+
self.url_with_session = f"{self.url}?sessionId={new_session_id}"
|
|
178
|
+
self._endpoint_ready.set() # Signal that we can now send requests
|
|
179
|
+
return
|
|
180
|
+
|
|
181
|
+
# Skip other non-message events
|
|
182
|
+
if event_type != 'message' and not data_str.startswith('{'):
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
if not data_str:
|
|
186
|
+
return
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
data = json.loads(data_str)
|
|
190
|
+
request_id = data.get('id')
|
|
191
|
+
|
|
192
|
+
logger.debug(f"[MCP SSE Client] Received response for request {request_id}")
|
|
193
|
+
|
|
194
|
+
# Resolve pending request
|
|
195
|
+
if request_id and request_id in self._pending_requests:
|
|
196
|
+
future = self._pending_requests.pop(request_id)
|
|
197
|
+
if not future.done():
|
|
198
|
+
future.set_result(data)
|
|
199
|
+
|
|
200
|
+
except json.JSONDecodeError as e:
|
|
201
|
+
logger.warning(f"[MCP SSE Client] Failed to parse SSE data: {e}, data: {repr(data_str)[:200]}")
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
logger.error(f"[MCP SSE Client] Stream reader error: {e}")
|
|
205
|
+
# Fail all pending requests
|
|
206
|
+
for future in self._pending_requests.values():
|
|
207
|
+
if not future.done():
|
|
208
|
+
future.set_exception(e)
|
|
209
|
+
finally:
|
|
210
|
+
logger.info(f"[MCP SSE Client] Stream reader stopped")
|
|
211
|
+
|
|
212
|
+
async def send_request(self, method: str, params: Optional[Dict[str, Any]] = None, request_id: Optional[str] = None) -> Dict[str, Any]:
|
|
213
|
+
"""
|
|
214
|
+
Send a JSON-RPC request and wait for response via SSE stream.
|
|
215
|
+
|
|
216
|
+
Uses dual-connection model:
|
|
217
|
+
1. GET stream is kept open to receive responses
|
|
218
|
+
2. POST request sends the command (returns 202 immediately)
|
|
219
|
+
3. Response comes via the GET stream
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
method: JSON-RPC method name (e.g., "tools/list", "tools/call")
|
|
223
|
+
params: Method parameters
|
|
224
|
+
request_id: Optional request ID (auto-generated if not provided)
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Parsed JSON-RPC response
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
Exception: If request fails or times out
|
|
231
|
+
"""
|
|
232
|
+
import time
|
|
233
|
+
if request_id is None:
|
|
234
|
+
request_id = f"{method.replace('/', '_')}_{int(time.time() * 1000)}"
|
|
235
|
+
|
|
236
|
+
request = {
|
|
237
|
+
"jsonrpc": "2.0",
|
|
238
|
+
"id": request_id,
|
|
239
|
+
"method": method,
|
|
240
|
+
"params": params or {}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
logger.debug(f"[MCP SSE Client] Sending request: {method} (id={request_id})")
|
|
244
|
+
|
|
245
|
+
# Ensure stream is connected
|
|
246
|
+
await self._ensure_stream_connected()
|
|
247
|
+
|
|
248
|
+
# Wait for endpoint event (server provides the actual session ID to use)
|
|
249
|
+
await asyncio.wait_for(self._endpoint_ready.wait(), timeout=10)
|
|
250
|
+
|
|
251
|
+
# Create future for this request
|
|
252
|
+
future = asyncio.Future()
|
|
253
|
+
self._pending_requests[request_id] = future
|
|
254
|
+
|
|
255
|
+
# Send POST request
|
|
256
|
+
headers = {
|
|
257
|
+
"Content-Type": "application/json",
|
|
258
|
+
**self.headers
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
timeout = aiohttp.ClientTimeout(total=30)
|
|
262
|
+
|
|
263
|
+
try:
|
|
264
|
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
|
265
|
+
async with session.post(self.url_with_session, json=request, headers=headers) as response:
|
|
266
|
+
if response.status == 404:
|
|
267
|
+
error_text = await response.text()
|
|
268
|
+
raise Exception(f"HTTP 404: {error_text}")
|
|
269
|
+
|
|
270
|
+
# 202 is expected - response will come via stream
|
|
271
|
+
if response.status not in [200, 202]:
|
|
272
|
+
error_text = await response.text()
|
|
273
|
+
raise Exception(f"HTTP {response.status}: {error_text}")
|
|
274
|
+
|
|
275
|
+
# Wait for response from stream (with timeout)
|
|
276
|
+
result = await asyncio.wait_for(future, timeout=self.timeout)
|
|
277
|
+
|
|
278
|
+
# Check for JSON-RPC error
|
|
279
|
+
if 'error' in result:
|
|
280
|
+
error = result['error']
|
|
281
|
+
raise Exception(f"MCP Error: {error.get('message', str(error))}")
|
|
282
|
+
|
|
283
|
+
return result
|
|
284
|
+
|
|
285
|
+
except asyncio.TimeoutError:
|
|
286
|
+
self._pending_requests.pop(request_id, None)
|
|
287
|
+
logger.error(f"[MCP SSE Client] Request timeout after {self.timeout}s")
|
|
288
|
+
raise Exception(f"SSE request timeout after {self.timeout}s")
|
|
289
|
+
except Exception as e:
|
|
290
|
+
self._pending_requests.pop(request_id, None)
|
|
291
|
+
logger.error(f"[MCP SSE Client] Request failed: {e}")
|
|
292
|
+
raise
|
|
293
|
+
|
|
294
|
+
async def close(self):
|
|
295
|
+
"""Close the persistent SSE stream."""
|
|
296
|
+
logger.info(f"[MCP SSE Client] Closing connection...")
|
|
297
|
+
|
|
298
|
+
# Cancel background stream reader task
|
|
299
|
+
if self._stream_task and not self._stream_task.done():
|
|
300
|
+
self._stream_task.cancel()
|
|
301
|
+
try:
|
|
302
|
+
await self._stream_task
|
|
303
|
+
except (asyncio.CancelledError, Exception) as e:
|
|
304
|
+
logger.debug(f"[MCP SSE Client] Stream task cleanup: {e}")
|
|
305
|
+
|
|
306
|
+
# Close response stream
|
|
307
|
+
if self._stream_response and not self._stream_response.closed:
|
|
308
|
+
try:
|
|
309
|
+
self._stream_response.close()
|
|
310
|
+
except Exception as e:
|
|
311
|
+
logger.debug(f"[MCP SSE Client] Response close error: {e}")
|
|
312
|
+
|
|
313
|
+
# Close session
|
|
314
|
+
if self._stream_session and not self._stream_session.closed:
|
|
315
|
+
try:
|
|
316
|
+
await self._stream_session.close()
|
|
317
|
+
# Give aiohttp time to cleanup
|
|
318
|
+
await asyncio.sleep(0.1)
|
|
319
|
+
except Exception as e:
|
|
320
|
+
logger.debug(f"[MCP SSE Client] Session close error: {e}")
|
|
321
|
+
|
|
322
|
+
logger.info(f"[MCP SSE Client] Connection closed")
|
|
323
|
+
|
|
324
|
+
async def __aenter__(self):
|
|
325
|
+
"""Async context manager entry."""
|
|
326
|
+
return self
|
|
327
|
+
|
|
328
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
329
|
+
"""Async context manager exit."""
|
|
330
|
+
await self.close()
|
|
331
|
+
|
|
332
|
+
async def initialize(self) -> Dict[str, Any]:
|
|
333
|
+
"""
|
|
334
|
+
Send initialize request to establish MCP protocol session.
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Server capabilities and info
|
|
338
|
+
"""
|
|
339
|
+
response = await self.send_request(
|
|
340
|
+
method="initialize",
|
|
341
|
+
params={
|
|
342
|
+
"protocolVersion": "2024-11-05",
|
|
343
|
+
"capabilities": {
|
|
344
|
+
"roots": {"listChanged": True},
|
|
345
|
+
"sampling": {}
|
|
346
|
+
},
|
|
347
|
+
"clientInfo": {
|
|
348
|
+
"name": "Alita MCP Client",
|
|
349
|
+
"version": "1.0.0"
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
logger.info(f"[MCP SSE Client] MCP session initialized")
|
|
355
|
+
return response.get('result', {})
|
|
356
|
+
|
|
357
|
+
async def list_tools(self) -> list:
|
|
358
|
+
"""
|
|
359
|
+
Discover available tools from the MCP server.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
List of tool definitions
|
|
363
|
+
"""
|
|
364
|
+
response = await self.send_request(method="tools/list")
|
|
365
|
+
result = response.get('result', {})
|
|
366
|
+
tools = result.get('tools', [])
|
|
367
|
+
|
|
368
|
+
logger.info(f"[MCP SSE Client] Discovered {len(tools)} tools")
|
|
369
|
+
return tools
|
|
370
|
+
|
|
371
|
+
async def list_prompts(self) -> list:
|
|
372
|
+
"""
|
|
373
|
+
Discover available prompts from the MCP server.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
List of prompt definitions
|
|
377
|
+
"""
|
|
378
|
+
response = await self.send_request(method="prompts/list")
|
|
379
|
+
result = response.get('result', {})
|
|
380
|
+
prompts = result.get('prompts', [])
|
|
381
|
+
|
|
382
|
+
logger.debug(f"[MCP SSE Client] Discovered {len(prompts)} prompts")
|
|
383
|
+
return prompts
|
|
384
|
+
|
|
385
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Any:
|
|
386
|
+
"""
|
|
387
|
+
Execute a tool on the MCP server.
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
tool_name: Name of the tool to call
|
|
391
|
+
arguments: Tool arguments
|
|
392
|
+
|
|
393
|
+
Returns:
|
|
394
|
+
Tool execution result
|
|
395
|
+
"""
|
|
396
|
+
response = await self.send_request(
|
|
397
|
+
method="tools/call",
|
|
398
|
+
params={
|
|
399
|
+
"name": tool_name,
|
|
400
|
+
"arguments": arguments
|
|
401
|
+
}
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
result = response.get('result', {})
|
|
405
|
+
return result
|
|
@@ -29,13 +29,14 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
|
|
|
29
29
|
|
|
30
30
|
Raises:
|
|
31
31
|
ValueError: If required configuration or client is missing
|
|
32
|
+
McpAuthorizationRequired: If MCP server requires OAuth authorization
|
|
32
33
|
Exception: If toolkit instantiation fails
|
|
33
34
|
"""
|
|
35
|
+
toolkit_name = toolkit_config.get('toolkit_name', 'unknown')
|
|
34
36
|
try:
|
|
35
37
|
from ..toolkits.tools import get_tools
|
|
36
38
|
|
|
37
|
-
toolkit_name
|
|
38
|
-
if not toolkit_name:
|
|
39
|
+
if not toolkit_name or toolkit_name == 'unknown':
|
|
39
40
|
raise ValueError("toolkit_name is required in configuration")
|
|
40
41
|
|
|
41
42
|
if not llm_client:
|
|
@@ -70,6 +71,12 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
|
|
|
70
71
|
return tools
|
|
71
72
|
|
|
72
73
|
except Exception as e:
|
|
74
|
+
# Re-raise McpAuthorizationRequired without logging as error
|
|
75
|
+
from ..utils.mcp_oauth import McpAuthorizationRequired
|
|
76
|
+
if isinstance(e, McpAuthorizationRequired):
|
|
77
|
+
logger.info(f"Toolkit {toolkit_name} requires MCP OAuth authorization")
|
|
78
|
+
raise
|
|
79
|
+
# Log and re-raise other errors
|
|
73
80
|
logger.error(f"Error instantiating toolkit {toolkit_name} with client: {str(e)}")
|
|
74
81
|
raise
|
|
75
82
|
|
|
@@ -27,7 +27,6 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
|
|
|
27
27
|
AzureDevOpsPlansToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
28
28
|
m = create_model(
|
|
29
29
|
name_alias,
|
|
30
|
-
name=(str, Field(description="Toolkit name", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': AzureDevOpsPlansToolkit.toolkit_max_length})),
|
|
31
30
|
ado_configuration=(AdoConfiguration, Field(description="Ado configuration", json_schema_extra={'configuration_types': ['ado']})),
|
|
32
31
|
limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
|
|
33
32
|
# indexer settings
|
|
@@ -40,6 +39,7 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
|
|
|
40
39
|
{
|
|
41
40
|
"label": "ADO plans",
|
|
42
41
|
"icon_url": "ado-plans.svg",
|
|
42
|
+
"max_length": AzureDevOpsPlansToolkit.toolkit_max_length,
|
|
43
43
|
"categories": ["test management"],
|
|
44
44
|
"extra_categories": ["test case management", "qa"],
|
|
45
45
|
"sections": {
|
|
@@ -24,11 +24,6 @@ class AzureDevOpsWikiToolkit(BaseToolkit):
|
|
|
24
24
|
AzureDevOpsWikiToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
25
25
|
m = create_model(
|
|
26
26
|
name_alias,
|
|
27
|
-
name=(str, Field(description="Toolkit name",
|
|
28
|
-
json_schema_extra={
|
|
29
|
-
'toolkit_name': True,
|
|
30
|
-
'max_toolkit_length': AzureDevOpsWikiToolkit.toolkit_max_length})
|
|
31
|
-
),
|
|
32
27
|
ado_configuration=(AdoConfiguration, Field(description="Ado configuration", json_schema_extra={'configuration_types': ['ado']})),
|
|
33
28
|
# indexer settings
|
|
34
29
|
pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
|
|
@@ -42,6 +37,7 @@ class AzureDevOpsWikiToolkit(BaseToolkit):
|
|
|
42
37
|
'metadata': {
|
|
43
38
|
"label": "ADO wiki",
|
|
44
39
|
"icon_url": "ado-wiki-icon.svg",
|
|
40
|
+
"max_length": AzureDevOpsWikiToolkit.toolkit_max_length,
|
|
45
41
|
"categories": ["documentation"],
|
|
46
42
|
"extra_categories": ["knowledge base", "documentation management", "wiki"],
|
|
47
43
|
"sections": {
|
|
@@ -23,11 +23,6 @@ class AzureDevOpsWorkItemsToolkit(BaseToolkit):
|
|
|
23
23
|
AzureDevOpsWorkItemsToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
24
24
|
m = create_model(
|
|
25
25
|
name,
|
|
26
|
-
name=(str, Field(description="Toolkit name",
|
|
27
|
-
json_schema_extra={
|
|
28
|
-
'toolkit_name': True,
|
|
29
|
-
'max_toolkit_length': AzureDevOpsWorkItemsToolkit.toolkit_max_length})
|
|
30
|
-
),
|
|
31
26
|
ado_configuration=(AdoConfiguration, Field(description="Ado Work Item configuration", json_schema_extra={'configuration_types': ['ado']})),
|
|
32
27
|
limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
|
|
33
28
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
@@ -42,6 +37,7 @@ class AzureDevOpsWorkItemsToolkit(BaseToolkit):
|
|
|
42
37
|
'metadata': {
|
|
43
38
|
"label": "ADO boards",
|
|
44
39
|
"icon_url": "ado-boards-icon.svg",
|
|
40
|
+
"max_length": AzureDevOpsWorkItemsToolkit.toolkit_max_length,
|
|
45
41
|
"categories": ["project management"],
|
|
46
42
|
"extra_categories": ["work item management", "issue tracking", "agile boards"],
|
|
47
43
|
"sections": {
|
|
@@ -15,6 +15,8 @@ from ..runtime.utils.utils import IndexerKeywords
|
|
|
15
15
|
|
|
16
16
|
logger = logging.getLogger(__name__)
|
|
17
17
|
|
|
18
|
+
DEFAULT_CUT_OFF = 0.2
|
|
19
|
+
|
|
18
20
|
# Base Vector Store Schema Models
|
|
19
21
|
BaseIndexParams = create_model(
|
|
20
22
|
"BaseIndexParams",
|
|
@@ -37,7 +39,7 @@ BaseSearchParams = create_model(
|
|
|
37
39
|
default={},
|
|
38
40
|
examples=["{\"key\": \"value\"}", "{\"status\": \"active\"}"]
|
|
39
41
|
)),
|
|
40
|
-
cut_off=(Optional[float], Field(description="Cut-off score for search results", default=
|
|
42
|
+
cut_off=(Optional[float], Field(description="Cut-off score for search results", default=DEFAULT_CUT_OFF, ge=0, le=1)),
|
|
41
43
|
search_top=(Optional[int], Field(description="Number of top results to return", default=10)),
|
|
42
44
|
full_text_search=(Optional[Dict[str, Any]], Field(
|
|
43
45
|
description="Full text search parameters. Can be a dictionary with search options.",
|
|
@@ -67,7 +69,7 @@ BaseStepbackSearchParams = create_model(
|
|
|
67
69
|
default={},
|
|
68
70
|
examples=["{\"key\": \"value\"}", "{\"status\": \"active\"}"]
|
|
69
71
|
)),
|
|
70
|
-
cut_off=(Optional[float], Field(description="Cut-off score for search results", default=
|
|
72
|
+
cut_off=(Optional[float], Field(description="Cut-off score for search results", default=DEFAULT_CUT_OFF, ge=0, le=1)),
|
|
71
73
|
search_top=(Optional[int], Field(description="Number of top results to return", default=10)),
|
|
72
74
|
full_text_search=(Optional[Dict[str, Any]], Field(
|
|
73
75
|
description="Full text search parameters. Can be a dictionary with search options.",
|
|
@@ -380,7 +382,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
380
382
|
def search_index(self,
|
|
381
383
|
query: str,
|
|
382
384
|
index_name: str = "",
|
|
383
|
-
filter: dict | str = {}, cut_off: float =
|
|
385
|
+
filter: dict | str = {}, cut_off: float = DEFAULT_CUT_OFF,
|
|
384
386
|
search_top: int = 10, reranker: dict = {},
|
|
385
387
|
full_text_search: Optional[Dict[str, Any]] = None,
|
|
386
388
|
reranking_config: Optional[Dict[str, Dict[str, Any]]] = None,
|
|
@@ -411,7 +413,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
411
413
|
query: str,
|
|
412
414
|
messages: List[Dict[str, Any]] = [],
|
|
413
415
|
index_name: str = "",
|
|
414
|
-
filter: dict | str = {}, cut_off: float =
|
|
416
|
+
filter: dict | str = {}, cut_off: float = DEFAULT_CUT_OFF,
|
|
415
417
|
search_top: int = 10, reranker: dict = {},
|
|
416
418
|
full_text_search: Optional[Dict[str, Any]] = None,
|
|
417
419
|
reranking_config: Optional[Dict[str, Dict[str, Any]]] = None,
|
|
@@ -436,7 +438,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
436
438
|
query: str,
|
|
437
439
|
messages: List[Dict[str, Any]] = [],
|
|
438
440
|
index_name: str = "",
|
|
439
|
-
filter: dict | str = {}, cut_off: float =
|
|
441
|
+
filter: dict | str = {}, cut_off: float = DEFAULT_CUT_OFF,
|
|
440
442
|
search_top: int = 10, reranker: dict = {},
|
|
441
443
|
full_text_search: Optional[Dict[str, Any]] = None,
|
|
442
444
|
reranking_config: Optional[Dict[str, Dict[str, Any]]] = None,
|
|
@@ -470,6 +472,7 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
470
472
|
"collection": index_name,
|
|
471
473
|
"type": IndexerKeywords.INDEX_META_TYPE.value,
|
|
472
474
|
"indexed": 0,
|
|
475
|
+
"updated": 0,
|
|
473
476
|
"state": IndexerKeywords.INDEX_META_IN_PROGRESS.value,
|
|
474
477
|
"index_configuration": index_configuration,
|
|
475
478
|
"created_on": created_on,
|
|
@@ -487,7 +490,8 @@ class BaseIndexerToolkit(VectorStoreWrapperBase):
|
|
|
487
490
|
#
|
|
488
491
|
if index_meta_raw:
|
|
489
492
|
metadata = copy.deepcopy(index_meta_raw.get("metadata", {}))
|
|
490
|
-
metadata["indexed"] =
|
|
493
|
+
metadata["indexed"] = self.get_indexed_count(index_name)
|
|
494
|
+
metadata["updated"] = result
|
|
491
495
|
metadata["state"] = state
|
|
492
496
|
metadata["updated_on"] = time.time()
|
|
493
497
|
#
|
|
@@ -61,6 +61,7 @@ class AlitaBitbucketToolkit(BaseToolkit):
|
|
|
61
61
|
'metadata':
|
|
62
62
|
{
|
|
63
63
|
"label": "Bitbucket", "icon_url": "bitbucket-icon.svg",
|
|
64
|
+
"max_length": AlitaBitbucketToolkit.toolkit_max_length,
|
|
64
65
|
"categories": ["code repositories"],
|
|
65
66
|
"extra_categories": ["bitbucket", "git", "repository", "code", "version control"],
|
|
66
67
|
}
|
|
@@ -29,7 +29,7 @@ class SonarToolkit(BaseToolkit):
|
|
|
29
29
|
SonarToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
30
30
|
return create_model(
|
|
31
31
|
name,
|
|
32
|
-
sonar_project_name=(str, Field(description="Project name of the desired repository"
|
|
32
|
+
sonar_project_name=(str, Field(description="Project name of the desired repository")),
|
|
33
33
|
sonar_configuration=(SonarConfiguration, Field(description="Sonar Configuration", json_schema_extra={'configuration_types': ['sonar']})),
|
|
34
34
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
35
35
|
__config__=ConfigDict(json_schema_extra=
|
|
@@ -67,8 +67,7 @@ class ConfluenceToolkit(BaseToolkit):
|
|
|
67
67
|
|
|
68
68
|
model = create_model(
|
|
69
69
|
name,
|
|
70
|
-
space=(str, Field(description="Space",
|
|
71
|
-
'max_toolkit_length': ConfluenceToolkit.toolkit_max_length})),
|
|
70
|
+
space=(str, Field(description="Space")),
|
|
72
71
|
cloud=(bool, Field(description="Hosting Option", json_schema_extra={'configuration': True})),
|
|
73
72
|
limit=(int, Field(description="Pages limit per request", default=5)),
|
|
74
73
|
labels=(Optional[str], Field(
|
|
@@ -95,6 +94,7 @@ class ConfluenceToolkit(BaseToolkit):
|
|
|
95
94
|
'metadata': {
|
|
96
95
|
"label": "Confluence",
|
|
97
96
|
"icon_url": None,
|
|
97
|
+
"max_length": ConfluenceToolkit.toolkit_max_length,
|
|
98
98
|
"categories": ["documentation"],
|
|
99
99
|
"extra_categories": ["confluence", "wiki", "knowledge base", "documentation", "atlassian"]
|
|
100
100
|
}
|
|
@@ -53,6 +53,7 @@ class AlitaGitHubToolkit(BaseToolkit):
|
|
|
53
53
|
'metadata': {
|
|
54
54
|
"label": "GitHub",
|
|
55
55
|
"icon_url": None,
|
|
56
|
+
"max_length": AlitaGitHubToolkit.toolkit_max_length,
|
|
56
57
|
"categories": ["code repositories"],
|
|
57
58
|
"extra_categories": ["github", "git", "repository", "code", "version control"],
|
|
58
59
|
},
|
|
@@ -62,8 +63,7 @@ class AlitaGitHubToolkit(BaseToolkit):
|
|
|
62
63
|
json_schema_extra={'configuration_types': ['github']})),
|
|
63
64
|
pgvector_configuration=(Optional[PgVectorConfiguration], Field(description="PgVector configuration", default=None,
|
|
64
65
|
json_schema_extra={'configuration_types': ['pgvector']})),
|
|
65
|
-
repository=(str, Field(description="Github repository",
|
|
66
|
-
'max_toolkit_length': AlitaGitHubToolkit.toolkit_max_length})),
|
|
66
|
+
repository=(str, Field(description="Github repository")),
|
|
67
67
|
active_branch=(Optional[str], Field(description="Active branch", default="main")),
|
|
68
68
|
base_branch=(Optional[str], Field(description="Github Base branch", default="main")),
|
|
69
69
|
# embedder settings
|
|
@@ -43,7 +43,7 @@ class AlitaGitlabToolkit(BaseToolkit):
|
|
|
43
43
|
AlitaGitlabToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
44
44
|
return create_model(
|
|
45
45
|
name,
|
|
46
|
-
repository=(str, Field(description="GitLab repository"
|
|
46
|
+
repository=(str, Field(description="GitLab repository")),
|
|
47
47
|
gitlab_configuration=(GitlabConfiguration, Field(description="GitLab configuration", json_schema_extra={'configuration_types': ['gitlab']})),
|
|
48
48
|
branch=(str, Field(description="Main branch", default="main")),
|
|
49
49
|
# indexer settings
|
|
@@ -57,6 +57,7 @@ class AlitaGitlabToolkit(BaseToolkit):
|
|
|
57
57
|
'metadata': {
|
|
58
58
|
"label": "GitLab",
|
|
59
59
|
"icon_url": None,
|
|
60
|
+
"max_length": AlitaGitlabToolkit.toolkit_max_length,
|
|
60
61
|
"categories": ["code repositories"],
|
|
61
62
|
"extra_categories": ["gitlab", "git", "repository", "code", "version control"],
|
|
62
63
|
}
|
|
@@ -30,8 +30,6 @@ class AlitaGitlabSpaceToolkit(BaseToolkit):
|
|
|
30
30
|
AlitaGitlabSpaceToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
31
31
|
return create_model(
|
|
32
32
|
name,
|
|
33
|
-
name=(str, Field(description="Toolkit name", json_schema_extra={'toolkit_name': True,
|
|
34
|
-
'max_toolkit_length': AlitaGitlabSpaceToolkit.toolkit_max_length})),
|
|
35
33
|
gitlab_configuration=(GitlabConfiguration, Field(description="GitLab configuration",
|
|
36
34
|
json_schema_extra={
|
|
37
35
|
'configuration_types': ['gitlab']})),
|
|
@@ -46,6 +44,7 @@ class AlitaGitlabSpaceToolkit(BaseToolkit):
|
|
|
46
44
|
'metadata': {
|
|
47
45
|
"label": "GitLab Org",
|
|
48
46
|
"icon_url": None,
|
|
47
|
+
"max_length": AlitaGitlabSpaceToolkit.toolkit_max_length,
|
|
49
48
|
"categories": ["code repositories"],
|
|
50
49
|
"extra_categories": ["gitlab", "git", "repository", "code", "version control"],
|
|
51
50
|
}
|