alita-sdk 0.3.365__py3-none-any.whl → 0.3.462__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (118) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent_executor.py +144 -0
  4. alita_sdk/cli/agent_loader.py +197 -0
  5. alita_sdk/cli/agent_ui.py +166 -0
  6. alita_sdk/cli/agents.py +1069 -0
  7. alita_sdk/cli/callbacks.py +576 -0
  8. alita_sdk/cli/cli.py +159 -0
  9. alita_sdk/cli/config.py +153 -0
  10. alita_sdk/cli/formatting.py +182 -0
  11. alita_sdk/cli/mcp_loader.py +315 -0
  12. alita_sdk/cli/toolkit.py +330 -0
  13. alita_sdk/cli/toolkit_loader.py +55 -0
  14. alita_sdk/cli/tools/__init__.py +9 -0
  15. alita_sdk/cli/tools/filesystem.py +905 -0
  16. alita_sdk/configurations/bitbucket.py +95 -0
  17. alita_sdk/configurations/confluence.py +96 -1
  18. alita_sdk/configurations/gitlab.py +79 -0
  19. alita_sdk/configurations/jira.py +103 -0
  20. alita_sdk/configurations/testrail.py +88 -0
  21. alita_sdk/configurations/xray.py +93 -0
  22. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  23. alita_sdk/configurations/zephyr_essential.py +75 -0
  24. alita_sdk/runtime/clients/artifact.py +1 -1
  25. alita_sdk/runtime/clients/client.py +47 -10
  26. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  27. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  28. alita_sdk/runtime/clients/sandbox_client.py +373 -0
  29. alita_sdk/runtime/langchain/assistant.py +70 -41
  30. alita_sdk/runtime/langchain/constants.py +6 -1
  31. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  32. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  33. alita_sdk/runtime/langchain/document_loaders/constants.py +73 -100
  34. alita_sdk/runtime/langchain/langraph_agent.py +164 -38
  35. alita_sdk/runtime/langchain/utils.py +43 -7
  36. alita_sdk/runtime/models/mcp_models.py +61 -0
  37. alita_sdk/runtime/toolkits/__init__.py +24 -0
  38. alita_sdk/runtime/toolkits/application.py +8 -1
  39. alita_sdk/runtime/toolkits/artifact.py +5 -6
  40. alita_sdk/runtime/toolkits/mcp.py +895 -0
  41. alita_sdk/runtime/toolkits/tools.py +140 -50
  42. alita_sdk/runtime/tools/__init__.py +7 -2
  43. alita_sdk/runtime/tools/application.py +7 -0
  44. alita_sdk/runtime/tools/function.py +94 -5
  45. alita_sdk/runtime/tools/graph.py +10 -4
  46. alita_sdk/runtime/tools/image_generation.py +104 -8
  47. alita_sdk/runtime/tools/llm.py +204 -114
  48. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  49. alita_sdk/runtime/tools/mcp_remote_tool.py +166 -0
  50. alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
  51. alita_sdk/runtime/tools/sandbox.py +180 -79
  52. alita_sdk/runtime/tools/vectorstore.py +22 -21
  53. alita_sdk/runtime/tools/vectorstore_base.py +79 -26
  54. alita_sdk/runtime/utils/mcp_oauth.py +164 -0
  55. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  56. alita_sdk/runtime/utils/streamlit.py +34 -3
  57. alita_sdk/runtime/utils/toolkit_utils.py +14 -4
  58. alita_sdk/runtime/utils/utils.py +1 -0
  59. alita_sdk/tools/__init__.py +48 -31
  60. alita_sdk/tools/ado/repos/__init__.py +1 -0
  61. alita_sdk/tools/ado/test_plan/__init__.py +1 -1
  62. alita_sdk/tools/ado/wiki/__init__.py +1 -5
  63. alita_sdk/tools/ado/work_item/__init__.py +1 -5
  64. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  65. alita_sdk/tools/base_indexer_toolkit.py +194 -112
  66. alita_sdk/tools/bitbucket/__init__.py +1 -0
  67. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  68. alita_sdk/tools/code/sonar/__init__.py +1 -1
  69. alita_sdk/tools/code_indexer_toolkit.py +15 -5
  70. alita_sdk/tools/confluence/__init__.py +2 -2
  71. alita_sdk/tools/confluence/api_wrapper.py +110 -63
  72. alita_sdk/tools/confluence/loader.py +10 -0
  73. alita_sdk/tools/elitea_base.py +22 -22
  74. alita_sdk/tools/github/__init__.py +2 -2
  75. alita_sdk/tools/gitlab/__init__.py +2 -1
  76. alita_sdk/tools/gitlab/api_wrapper.py +11 -7
  77. alita_sdk/tools/gitlab_org/__init__.py +1 -2
  78. alita_sdk/tools/google_places/__init__.py +2 -1
  79. alita_sdk/tools/jira/__init__.py +1 -0
  80. alita_sdk/tools/jira/api_wrapper.py +1 -1
  81. alita_sdk/tools/memory/__init__.py +1 -1
  82. alita_sdk/tools/non_code_indexer_toolkit.py +2 -2
  83. alita_sdk/tools/openapi/__init__.py +10 -1
  84. alita_sdk/tools/pandas/__init__.py +1 -1
  85. alita_sdk/tools/postman/__init__.py +2 -1
  86. alita_sdk/tools/postman/api_wrapper.py +18 -8
  87. alita_sdk/tools/postman/postman_analysis.py +8 -1
  88. alita_sdk/tools/pptx/__init__.py +2 -2
  89. alita_sdk/tools/qtest/__init__.py +3 -3
  90. alita_sdk/tools/qtest/api_wrapper.py +1708 -76
  91. alita_sdk/tools/rally/__init__.py +1 -2
  92. alita_sdk/tools/report_portal/__init__.py +1 -0
  93. alita_sdk/tools/salesforce/__init__.py +1 -0
  94. alita_sdk/tools/servicenow/__init__.py +2 -3
  95. alita_sdk/tools/sharepoint/__init__.py +1 -0
  96. alita_sdk/tools/sharepoint/api_wrapper.py +125 -34
  97. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  98. alita_sdk/tools/sharepoint/utils.py +8 -2
  99. alita_sdk/tools/slack/__init__.py +1 -0
  100. alita_sdk/tools/sql/__init__.py +2 -1
  101. alita_sdk/tools/sql/api_wrapper.py +71 -23
  102. alita_sdk/tools/testio/__init__.py +1 -0
  103. alita_sdk/tools/testrail/__init__.py +1 -3
  104. alita_sdk/tools/utils/__init__.py +17 -0
  105. alita_sdk/tools/utils/content_parser.py +35 -24
  106. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +67 -21
  107. alita_sdk/tools/xray/__init__.py +2 -1
  108. alita_sdk/tools/zephyr/__init__.py +2 -1
  109. alita_sdk/tools/zephyr_enterprise/__init__.py +1 -0
  110. alita_sdk/tools/zephyr_essential/__init__.py +1 -0
  111. alita_sdk/tools/zephyr_scale/__init__.py +1 -0
  112. alita_sdk/tools/zephyr_squad/__init__.py +1 -0
  113. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/METADATA +8 -2
  114. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/RECORD +118 -93
  115. alita_sdk-0.3.462.dist-info/entry_points.txt +2 -0
  116. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/WHEEL +0 -0
  117. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/licenses/LICENSE +0 -0
  118. {alita_sdk-0.3.365.dist-info → alita_sdk-0.3.462.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,405 @@
1
+ """
2
+ MCP SSE (Server-Sent Events) Client
3
+ Handles persistent SSE connections for MCP servers like Atlassian
4
+ """
5
+ import asyncio
6
+ import json
7
+ import logging
8
+ from typing import Dict, Any, Optional, AsyncIterator
9
+ import aiohttp
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class McpSseClient:
15
+ """
16
+ Client for MCP servers using SSE (Server-Sent Events) transport.
17
+
18
+ For Atlassian-style SSE (dual-connection model):
19
+ - GET request opens persistent SSE stream for receiving events
20
+ - POST requests send commands (return 202 Accepted immediately)
21
+ - Responses come via the GET stream
22
+
23
+ This client handles:
24
+ - Opening persistent SSE connection via GET
25
+ - Sending JSON-RPC requests via POST
26
+ - Reading SSE event streams
27
+ - Matching responses to requests by ID
28
+ """
29
+
30
+ def __init__(self, url: str, session_id: str, headers: Optional[Dict[str, str]] = None, timeout: int = 300):
31
+ """
32
+ Initialize SSE client.
33
+
34
+ Args:
35
+ url: Base URL of the MCP SSE server
36
+ session_id: Client-generated UUID for session
37
+ headers: Additional headers (e.g., Authorization)
38
+ timeout: Request timeout in seconds
39
+ """
40
+ self.url = url
41
+ self.session_id = session_id
42
+ self.headers = headers or {}
43
+ self.timeout = timeout
44
+ self.url_with_session = f"{url}?sessionId={session_id}"
45
+ self._stream_task = None
46
+ self._pending_requests = {} # request_id -> asyncio.Future
47
+ self._stream_session = None
48
+ self._stream_response = None
49
+ self._endpoint_ready = asyncio.Event() # Signal when endpoint is received
50
+
51
+ logger.info(f"[MCP SSE Client] Initialized for {url} with session {session_id}")
52
+
53
+ async def _ensure_stream_connected(self):
54
+ """Ensure the GET stream is connected and reading events."""
55
+ if self._stream_task is None or self._stream_task.done():
56
+ logger.info(f"[MCP SSE Client] Opening persistent SSE stream...")
57
+ self._stream_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=None))
58
+
59
+ headers = {
60
+ "Accept": "text/event-stream",
61
+ **self.headers
62
+ }
63
+
64
+ self._stream_response = await self._stream_session.get(self.url_with_session, headers=headers)
65
+
66
+ logger.info(f"[MCP SSE Client] Stream opened: status={self._stream_response.status}")
67
+
68
+ # Handle 401 Unauthorized - need OAuth
69
+ if self._stream_response.status == 401:
70
+ from ..utils.mcp_oauth import (
71
+ McpAuthorizationRequired,
72
+ canonical_resource,
73
+ extract_resource_metadata_url,
74
+ fetch_resource_metadata_async,
75
+ infer_authorization_servers_from_realm,
76
+ fetch_oauth_authorization_server_metadata
77
+ )
78
+
79
+ auth_header = self._stream_response.headers.get('WWW-Authenticate', '')
80
+ resource_metadata_url = extract_resource_metadata_url(auth_header, self.url)
81
+
82
+ metadata = None
83
+ if resource_metadata_url:
84
+ metadata = await fetch_resource_metadata_async(
85
+ resource_metadata_url,
86
+ session=self._stream_session,
87
+ timeout=30
88
+ )
89
+
90
+ # Infer authorization servers if not in metadata
91
+ if not metadata or not metadata.get('authorization_servers'):
92
+ inferred_servers = infer_authorization_servers_from_realm(auth_header, self.url)
93
+ if inferred_servers:
94
+ if not metadata:
95
+ metadata = {}
96
+ metadata['authorization_servers'] = inferred_servers
97
+ logger.info(f"[MCP SSE Client] Inferred authorization servers: {inferred_servers}")
98
+
99
+ # Fetch OAuth metadata
100
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(inferred_servers[0], timeout=30)
101
+ if auth_server_metadata:
102
+ metadata['oauth_authorization_server'] = auth_server_metadata
103
+ logger.info(f"[MCP SSE Client] Fetched OAuth metadata")
104
+
105
+ raise McpAuthorizationRequired(
106
+ message=f"MCP server {self.url} requires OAuth authorization",
107
+ server_url=canonical_resource(self.url),
108
+ resource_metadata_url=resource_metadata_url,
109
+ www_authenticate=auth_header,
110
+ resource_metadata=metadata,
111
+ status=self._stream_response.status,
112
+ tool_name=self.url,
113
+ )
114
+
115
+ if self._stream_response.status != 200:
116
+ error_text = await self._stream_response.text()
117
+ raise Exception(f"Failed to open SSE stream: HTTP {self._stream_response.status}: {error_text}")
118
+
119
+ # Start background task to read stream
120
+ self._stream_task = asyncio.create_task(self._read_stream())
121
+
122
+ async def _read_stream(self):
123
+ """Background task that continuously reads the SSE stream."""
124
+ logger.info(f"[MCP SSE Client] Starting stream reader...")
125
+
126
+ try:
127
+ buffer = ""
128
+ current_event = {}
129
+
130
+ async for chunk in self._stream_response.content.iter_chunked(1024):
131
+ chunk_str = chunk.decode('utf-8')
132
+ buffer += chunk_str
133
+
134
+ # Process complete lines
135
+ while '\n' in buffer:
136
+ line, buffer = buffer.split('\n', 1)
137
+ line_str = line.strip()
138
+
139
+ # Empty line indicates end of event
140
+ if not line_str:
141
+ if current_event and 'data' in current_event:
142
+ self._process_event(current_event)
143
+ current_event = {}
144
+ continue
145
+
146
+ # Parse SSE fields
147
+ if line_str.startswith('event:'):
148
+ current_event['event'] = line_str[6:].strip()
149
+ elif line_str.startswith('data:'):
150
+ data_str = line_str[5:].strip()
151
+ current_event['data'] = data_str
152
+ elif line_str.startswith('id:'):
153
+ current_event['id'] = line_str[3:].strip()
154
+
155
+ except Exception as e:
156
+ logger.error(f"[MCP SSE Client] Stream reader error: {e}")
157
+ # Fail all pending requests
158
+ for future in self._pending_requests.values():
159
+ if not future.done():
160
+ future.set_exception(e)
161
+ finally:
162
+ logger.info(f"[MCP SSE Client] Stream reader stopped")
163
+
164
+ def _process_event(self, event: Dict[str, str]):
165
+ """Process a complete SSE event."""
166
+ event_type = event.get('event', 'message')
167
+ data_str = event.get('data', '')
168
+
169
+ # Handle 'endpoint' event - server provides the actual session URL to use
170
+ if event_type == 'endpoint':
171
+ # Extract session ID from endpoint URL
172
+ # Format: /v1/sse?sessionId=<uuid>
173
+ if 'sessionId=' in data_str:
174
+ new_session_id = data_str.split('sessionId=')[1].split('&')[0]
175
+ logger.info(f"[MCP SSE Client] Server provided session ID: {new_session_id}")
176
+ self.session_id = new_session_id
177
+ self.url_with_session = f"{self.url}?sessionId={new_session_id}"
178
+ self._endpoint_ready.set() # Signal that we can now send requests
179
+ return
180
+
181
+ # Skip other non-message events
182
+ if event_type != 'message' and not data_str.startswith('{'):
183
+ return
184
+
185
+ if not data_str:
186
+ return
187
+
188
+ try:
189
+ data = json.loads(data_str)
190
+ request_id = data.get('id')
191
+
192
+ logger.debug(f"[MCP SSE Client] Received response for request {request_id}")
193
+
194
+ # Resolve pending request
195
+ if request_id and request_id in self._pending_requests:
196
+ future = self._pending_requests.pop(request_id)
197
+ if not future.done():
198
+ future.set_result(data)
199
+
200
+ except json.JSONDecodeError as e:
201
+ logger.warning(f"[MCP SSE Client] Failed to parse SSE data: {e}, data: {repr(data_str)[:200]}")
202
+
203
+ except Exception as e:
204
+ logger.error(f"[MCP SSE Client] Stream reader error: {e}")
205
+ # Fail all pending requests
206
+ for future in self._pending_requests.values():
207
+ if not future.done():
208
+ future.set_exception(e)
209
+ finally:
210
+ logger.info(f"[MCP SSE Client] Stream reader stopped")
211
+
212
+ async def send_request(self, method: str, params: Optional[Dict[str, Any]] = None, request_id: Optional[str] = None) -> Dict[str, Any]:
213
+ """
214
+ Send a JSON-RPC request and wait for response via SSE stream.
215
+
216
+ Uses dual-connection model:
217
+ 1. GET stream is kept open to receive responses
218
+ 2. POST request sends the command (returns 202 immediately)
219
+ 3. Response comes via the GET stream
220
+
221
+ Args:
222
+ method: JSON-RPC method name (e.g., "tools/list", "tools/call")
223
+ params: Method parameters
224
+ request_id: Optional request ID (auto-generated if not provided)
225
+
226
+ Returns:
227
+ Parsed JSON-RPC response
228
+
229
+ Raises:
230
+ Exception: If request fails or times out
231
+ """
232
+ import time
233
+ if request_id is None:
234
+ request_id = f"{method.replace('/', '_')}_{int(time.time() * 1000)}"
235
+
236
+ request = {
237
+ "jsonrpc": "2.0",
238
+ "id": request_id,
239
+ "method": method,
240
+ "params": params or {}
241
+ }
242
+
243
+ logger.debug(f"[MCP SSE Client] Sending request: {method} (id={request_id})")
244
+
245
+ # Ensure stream is connected
246
+ await self._ensure_stream_connected()
247
+
248
+ # Wait for endpoint event (server provides the actual session ID to use)
249
+ await asyncio.wait_for(self._endpoint_ready.wait(), timeout=10)
250
+
251
+ # Create future for this request
252
+ future = asyncio.Future()
253
+ self._pending_requests[request_id] = future
254
+
255
+ # Send POST request
256
+ headers = {
257
+ "Content-Type": "application/json",
258
+ **self.headers
259
+ }
260
+
261
+ timeout = aiohttp.ClientTimeout(total=30)
262
+
263
+ try:
264
+ async with aiohttp.ClientSession(timeout=timeout) as session:
265
+ async with session.post(self.url_with_session, json=request, headers=headers) as response:
266
+ if response.status == 404:
267
+ error_text = await response.text()
268
+ raise Exception(f"HTTP 404: {error_text}")
269
+
270
+ # 202 is expected - response will come via stream
271
+ if response.status not in [200, 202]:
272
+ error_text = await response.text()
273
+ raise Exception(f"HTTP {response.status}: {error_text}")
274
+
275
+ # Wait for response from stream (with timeout)
276
+ result = await asyncio.wait_for(future, timeout=self.timeout)
277
+
278
+ # Check for JSON-RPC error
279
+ if 'error' in result:
280
+ error = result['error']
281
+ raise Exception(f"MCP Error: {error.get('message', str(error))}")
282
+
283
+ return result
284
+
285
+ except asyncio.TimeoutError:
286
+ self._pending_requests.pop(request_id, None)
287
+ logger.error(f"[MCP SSE Client] Request timeout after {self.timeout}s")
288
+ raise Exception(f"SSE request timeout after {self.timeout}s")
289
+ except Exception as e:
290
+ self._pending_requests.pop(request_id, None)
291
+ logger.error(f"[MCP SSE Client] Request failed: {e}")
292
+ raise
293
+
294
+ async def close(self):
295
+ """Close the persistent SSE stream."""
296
+ logger.info(f"[MCP SSE Client] Closing connection...")
297
+
298
+ # Cancel background stream reader task
299
+ if self._stream_task and not self._stream_task.done():
300
+ self._stream_task.cancel()
301
+ try:
302
+ await self._stream_task
303
+ except (asyncio.CancelledError, Exception) as e:
304
+ logger.debug(f"[MCP SSE Client] Stream task cleanup: {e}")
305
+
306
+ # Close response stream
307
+ if self._stream_response and not self._stream_response.closed:
308
+ try:
309
+ self._stream_response.close()
310
+ except Exception as e:
311
+ logger.debug(f"[MCP SSE Client] Response close error: {e}")
312
+
313
+ # Close session
314
+ if self._stream_session and not self._stream_session.closed:
315
+ try:
316
+ await self._stream_session.close()
317
+ # Give aiohttp time to cleanup
318
+ await asyncio.sleep(0.1)
319
+ except Exception as e:
320
+ logger.debug(f"[MCP SSE Client] Session close error: {e}")
321
+
322
+ logger.info(f"[MCP SSE Client] Connection closed")
323
+
324
+ async def __aenter__(self):
325
+ """Async context manager entry."""
326
+ return self
327
+
328
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
329
+ """Async context manager exit."""
330
+ await self.close()
331
+
332
+ async def initialize(self) -> Dict[str, Any]:
333
+ """
334
+ Send initialize request to establish MCP protocol session.
335
+
336
+ Returns:
337
+ Server capabilities and info
338
+ """
339
+ response = await self.send_request(
340
+ method="initialize",
341
+ params={
342
+ "protocolVersion": "2024-11-05",
343
+ "capabilities": {
344
+ "roots": {"listChanged": True},
345
+ "sampling": {}
346
+ },
347
+ "clientInfo": {
348
+ "name": "Alita MCP Client",
349
+ "version": "1.0.0"
350
+ }
351
+ }
352
+ )
353
+
354
+ logger.info(f"[MCP SSE Client] MCP session initialized")
355
+ return response.get('result', {})
356
+
357
+ async def list_tools(self) -> list:
358
+ """
359
+ Discover available tools from the MCP server.
360
+
361
+ Returns:
362
+ List of tool definitions
363
+ """
364
+ response = await self.send_request(method="tools/list")
365
+ result = response.get('result', {})
366
+ tools = result.get('tools', [])
367
+
368
+ logger.info(f"[MCP SSE Client] Discovered {len(tools)} tools")
369
+ return tools
370
+
371
+ async def list_prompts(self) -> list:
372
+ """
373
+ Discover available prompts from the MCP server.
374
+
375
+ Returns:
376
+ List of prompt definitions
377
+ """
378
+ response = await self.send_request(method="prompts/list")
379
+ result = response.get('result', {})
380
+ prompts = result.get('prompts', [])
381
+
382
+ logger.debug(f"[MCP SSE Client] Discovered {len(prompts)} prompts")
383
+ return prompts
384
+
385
+ async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Any:
386
+ """
387
+ Execute a tool on the MCP server.
388
+
389
+ Args:
390
+ tool_name: Name of the tool to call
391
+ arguments: Tool arguments
392
+
393
+ Returns:
394
+ Tool execution result
395
+ """
396
+ response = await self.send_request(
397
+ method="tools/call",
398
+ params={
399
+ "name": tool_name,
400
+ "arguments": arguments
401
+ }
402
+ )
403
+
404
+ result = response.get('result', {})
405
+ return result
@@ -868,10 +868,24 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
868
868
  label = f"{'🔒 ' if is_secret else ''}{'*' if is_required else ''}{field_name.replace('_', ' ').title()}"
869
869
 
870
870
  if field_type == 'string':
871
- if is_secret:
871
+ # Check if this is an enum field
872
+ if field_schema.get('enum'):
873
+ # Dropdown for enum values
874
+ options = field_schema['enum']
875
+ default_index = 0
876
+ if default_value and str(default_value) in options:
877
+ default_index = options.index(str(default_value))
878
+ toolkit_config_values[field_name] = st.selectbox(
879
+ label,
880
+ options=options,
881
+ index=default_index,
882
+ help=field_description,
883
+ key=f"config_{field_name}_{selected_toolkit_idx}"
884
+ )
885
+ elif is_secret:
872
886
  toolkit_config_values[field_name] = st.text_input(
873
887
  label,
874
- value=str(default_value) if default_value else '',
888
+ value=str(default_value) if default_value else '',
875
889
  help=field_description,
876
890
  type="password",
877
891
  key=f"config_{field_name}_{selected_toolkit_idx}"
@@ -879,7 +893,7 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
879
893
  else:
880
894
  toolkit_config_values[field_name] = st.text_input(
881
895
  label,
882
- value=str(default_value) if default_value else '',
896
+ value=str(default_value) if default_value else '',
883
897
  help=field_description,
884
898
  key=f"config_{field_name}_{selected_toolkit_idx}"
885
899
  )
@@ -971,6 +985,23 @@ def run_streamlit(st, ai_icon=None, user_icon=None):
971
985
  key=f"config_{field_name}_{selected_toolkit_idx}"
972
986
  )
973
987
  toolkit_config_values[field_name] = [line.strip() for line in array_input.split('\n') if line.strip()]
988
+ elif field_type == 'object':
989
+ # Handle object/dict types (like headers)
990
+ obj_input = st.text_area(
991
+ f"{label} (JSON object)",
992
+ value=json.dumps(default_value) if isinstance(default_value, dict) else str(default_value) if default_value else '',
993
+ help=f"{field_description} - Enter as JSON object, e.g. {{\"Authorization\": \"Bearer token\"}}",
994
+ placeholder='{"key": "value"}',
995
+ key=f"config_{field_name}_{selected_toolkit_idx}"
996
+ )
997
+ try:
998
+ if obj_input.strip():
999
+ toolkit_config_values[field_name] = json.loads(obj_input)
1000
+ else:
1001
+ toolkit_config_values[field_name] = None
1002
+ except json.JSONDecodeError as e:
1003
+ st.error(f"Invalid JSON format for {field_name}: {e}")
1004
+ toolkit_config_values[field_name] = None
974
1005
  else:
975
1006
  st.info("This toolkit doesn't require additional configuration.")
976
1007
 
@@ -29,13 +29,14 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
29
29
 
30
30
  Raises:
31
31
  ValueError: If required configuration or client is missing
32
+ McpAuthorizationRequired: If MCP server requires OAuth authorization
32
33
  Exception: If toolkit instantiation fails
33
34
  """
35
+ toolkit_name = toolkit_config.get('toolkit_name', 'unknown')
34
36
  try:
35
37
  from ..toolkits.tools import get_tools
36
38
 
37
- toolkit_name = toolkit_config.get('toolkit_name')
38
- if not toolkit_name:
39
+ if not toolkit_name or toolkit_name == 'unknown':
39
40
  raise ValueError("toolkit_name is required in configuration")
40
41
 
41
42
  if not llm_client:
@@ -46,11 +47,14 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
46
47
  # Log the configuration being used
47
48
  logger.info(f"Instantiating toolkit {toolkit_name} with LLM client")
48
49
  logger.debug(f"Toolkit {toolkit_name} configuration: {toolkit_config}")
49
-
50
+
51
+ # Use toolkit type from config, or fall back to lowercase toolkit name
52
+ toolkit_type = toolkit_config.get('type', toolkit_name.lower())
53
+
50
54
  # Create a tool configuration dict with required fields
51
55
  tool_config = {
52
56
  'id': toolkit_config.get('id', random.randint(1, 1000000)),
53
- 'type': toolkit_config.get('type', toolkit_name.lower()),
57
+ 'type': toolkit_config.get('type', toolkit_type),
54
58
  'settings': settings,
55
59
  'toolkit_name': toolkit_name
56
60
  }
@@ -67,6 +71,12 @@ def instantiate_toolkit_with_client(toolkit_config: Dict[str, Any],
67
71
  return tools
68
72
 
69
73
  except Exception as e:
74
+ # Re-raise McpAuthorizationRequired without logging as error
75
+ from ..utils.mcp_oauth import McpAuthorizationRequired
76
+ if isinstance(e, McpAuthorizationRequired):
77
+ logger.info(f"Toolkit {toolkit_name} requires MCP OAuth authorization")
78
+ raise
79
+ # Log and re-raise other errors
70
80
  logger.error(f"Error instantiating toolkit {toolkit_name} with client: {str(e)}")
71
81
  raise
72
82
 
@@ -14,6 +14,7 @@ class IndexerKeywords(Enum):
14
14
  INDEX_META_TYPE = 'index_meta'
15
15
  INDEX_META_IN_PROGRESS = 'in_progress'
16
16
  INDEX_META_COMPLETED = 'completed'
17
+ INDEX_META_FAILED = 'failed'
17
18
 
18
19
  # This pattern matches characters that are NOT alphanumeric, underscores, or hyphens
19
20
  clean_string_pattern = re.compile(r'[^a-zA-Z0-9_.-]')
@@ -90,62 +90,79 @@ available_count = len(AVAILABLE_TOOLS)
90
90
  total_attempted = len(AVAILABLE_TOOLS) + len(FAILED_IMPORTS)
91
91
  logger.info(f"Tool imports completed: {available_count}/{total_attempted} successful")
92
92
 
93
+
93
94
  def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args, **kwargs):
94
95
  tools = []
96
+
95
97
  for tool in tools_list:
96
- # validate tool name syntax - it cannot be started with _
97
- for tool_name in tool.get('settings', {}).get('selected_tools', []):
98
- if isinstance(tool_name, str) and tool_name.startswith('_'):
99
- raise ValueError(f"Tool name '{tool_name}' from toolkit '{tool.get('type', '')}' cannot start with '_'")
100
-
101
- tool['settings']['alita'] = alita
102
- tool['settings']['llm'] = llm
103
- tool['settings']['store'] = store
98
+ settings = tool.get('settings')
99
+
100
+ # Skip tools without settings early
101
+ if not settings:
102
+ logger.warning(f"Tool '{tool.get('type', '')}' has no settings, skipping...")
103
+ continue
104
+
105
+ # Validate tool names once
106
+ selected_tools = settings.get('selected_tools', [])
107
+ invalid_tools = [name for name in selected_tools if isinstance(name, str) and name.startswith('_')]
108
+ if invalid_tools:
109
+ raise ValueError(f"Tool names {invalid_tools} from toolkit '{tool.get('type', '')}' cannot start with '_'")
110
+
111
+ # Cache tool type and add common settings
104
112
  tool_type = tool['type']
113
+ settings['alita'] = alita
114
+ settings['llm'] = llm
115
+ settings['store'] = store
116
+
117
+ # Set pgvector collection schema if present
118
+ if settings.get('pgvector_configuration'):
119
+ settings['pgvector_configuration']['collection_schema'] = str(tool['id'])
105
120
 
106
- # Handle special cases for ADO tools
121
+ # Handle ADO special cases
107
122
  if tool_type in ['ado_boards', 'ado_wiki', 'ado_plans']:
108
123
  tools.extend(AVAILABLE_TOOLS['ado']['get_tools'](tool_type, tool))
124
+ continue
109
125
 
110
- # Check if tool is available and has get_tools function
111
- elif tool_type in AVAILABLE_TOOLS and 'get_tools' in AVAILABLE_TOOLS[tool_type]:
126
+ # Handle ADO repos aliases
127
+ if tool_type in ['ado_repos', 'azure_devops_repos'] and 'ado_repos' in AVAILABLE_TOOLS:
112
128
  try:
113
- get_tools_func = AVAILABLE_TOOLS[tool_type]['get_tools']
114
- tools.extend(get_tools_func(tool))
115
-
129
+ tools.extend(AVAILABLE_TOOLS['ado_repos']['get_tools'](tool))
116
130
  except Exception as e:
117
- logger.error(f"Error getting tools for {tool_type}: {e}")
118
- raise ToolException(f"Error getting tools for {tool_type}: {e}")
131
+ logger.error(f"Error getting ADO repos tools: {e}")
132
+ continue
119
133
 
120
- # Handle ADO repos special case (it might be requested as azure_devops_repos)
121
- elif tool_type in ['ado_repos', 'azure_devops_repos'] and 'ado_repos' in AVAILABLE_TOOLS:
134
+ # Skip MCP toolkit - it's handled by runtime/toolkits/tools.py to avoid duplicate loading
135
+ if tool_type == 'mcp':
136
+ logger.debug(f"Skipping MCP toolkit '{tool.get('toolkit_name')}' - handled by runtime toolkit system")
137
+ continue
138
+
139
+ # Handle standard tools
140
+ if tool_type in AVAILABLE_TOOLS and 'get_tools' in AVAILABLE_TOOLS[tool_type]:
122
141
  try:
123
- get_tools_func = AVAILABLE_TOOLS['ado_repos']['get_tools']
124
- tools.extend(get_tools_func(tool))
142
+ tools.extend(AVAILABLE_TOOLS[tool_type]['get_tools'](tool))
125
143
  except Exception as e:
126
- logger.error(f"Error getting ADO repos tools: {e}")
144
+ logger.error(f"Error getting tools for {tool_type}: {e}")
145
+ raise ToolException(f"Error getting tools for {tool_type}: {e}")
146
+ continue
127
147
 
128
148
  # Handle custom modules
129
- elif tool.get("settings", {}).get("module"):
149
+ if settings.get("module"):
130
150
  try:
131
- settings = tool.get("settings", {})
132
151
  mod = import_module(settings.pop("module"))
133
152
  tkitclass = getattr(mod, settings.pop("class"))
134
- #
135
- get_toolkit_params = tool["settings"].copy()
153
+ get_toolkit_params = settings.copy()
136
154
  get_toolkit_params["name"] = tool.get("name")
137
- #
138
155
  toolkit = tkitclass.get_toolkit(**get_toolkit_params)
139
156
  tools.extend(toolkit.get_tools())
140
157
  except Exception as e:
141
158
  logger.error(f"Error in getting custom toolkit: {e}")
159
+ continue
142
160
 
161
+ # Tool not available
162
+ if tool_type in FAILED_IMPORTS:
163
+ logger.warning(f"Tool '{tool_type}' is not available: {FAILED_IMPORTS[tool_type]}")
143
164
  else:
144
- # Tool not available or not found
145
- if tool_type in FAILED_IMPORTS:
146
- logger.warning(f"Tool '{tool_type}' is not available: {FAILED_IMPORTS[tool_type]}")
147
- else:
148
- logger.warning(f"Unknown tool type: {tool_type}")
165
+ logger.warning(f"Unknown tool type: {tool_type}")
149
166
 
150
167
  return tools
151
168
 
@@ -63,6 +63,7 @@ class AzureDevOpsReposToolkit(BaseToolkit):
63
63
  "icon_url": "ado-repos-icon.svg",
64
64
  "categories": ["code repositories"],
65
65
  "extra_categories": ["code", "repository", "version control"],
66
+ "max_length": AzureDevOpsReposToolkit.toolkit_max_length
66
67
  }}}
67
68
  )
68
69
 
@@ -27,7 +27,6 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
27
27
  AzureDevOpsPlansToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
28
28
  m = create_model(
29
29
  name_alias,
30
- name=(str, Field(description="Toolkit name", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': AzureDevOpsPlansToolkit.toolkit_max_length})),
31
30
  ado_configuration=(AdoConfiguration, Field(description="Ado configuration", json_schema_extra={'configuration_types': ['ado']})),
32
31
  limit=(Optional[int], Field(description="ADO plans limit used for limitation of the list with results", default=5)),
33
32
  # indexer settings
@@ -40,6 +39,7 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
40
39
  {
41
40
  "label": "ADO plans",
42
41
  "icon_url": "ado-plans.svg",
42
+ "max_length": AzureDevOpsPlansToolkit.toolkit_max_length,
43
43
  "categories": ["test management"],
44
44
  "extra_categories": ["test case management", "qa"],
45
45
  "sections": {