alita-sdk 0.3.497__py3-none-any.whl → 0.3.515__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (108) hide show
  1. alita_sdk/cli/inventory.py +12 -195
  2. alita_sdk/community/inventory/__init__.py +12 -0
  3. alita_sdk/community/inventory/toolkit.py +9 -5
  4. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  5. alita_sdk/configurations/ado.py +144 -0
  6. alita_sdk/configurations/confluence.py +76 -42
  7. alita_sdk/configurations/figma.py +76 -0
  8. alita_sdk/configurations/gitlab.py +2 -0
  9. alita_sdk/configurations/qtest.py +72 -1
  10. alita_sdk/configurations/report_portal.py +96 -0
  11. alita_sdk/configurations/sharepoint.py +148 -0
  12. alita_sdk/configurations/testio.py +83 -0
  13. alita_sdk/runtime/clients/artifact.py +2 -2
  14. alita_sdk/runtime/clients/client.py +24 -19
  15. alita_sdk/runtime/clients/sandbox_client.py +14 -0
  16. alita_sdk/runtime/langchain/assistant.py +48 -2
  17. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  18. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
  19. alita_sdk/runtime/langchain/document_loaders/constants.py +2 -1
  20. alita_sdk/runtime/langchain/langraph_agent.py +8 -9
  21. alita_sdk/runtime/langchain/utils.py +6 -1
  22. alita_sdk/runtime/toolkits/artifact.py +14 -5
  23. alita_sdk/runtime/toolkits/datasource.py +13 -6
  24. alita_sdk/runtime/toolkits/mcp.py +26 -157
  25. alita_sdk/runtime/toolkits/planning.py +10 -5
  26. alita_sdk/runtime/toolkits/tools.py +23 -7
  27. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  28. alita_sdk/runtime/tools/artifact.py +139 -6
  29. alita_sdk/runtime/tools/llm.py +20 -10
  30. alita_sdk/runtime/tools/mcp_remote_tool.py +2 -3
  31. alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
  32. alita_sdk/runtime/utils/AlitaCallback.py +30 -1
  33. alita_sdk/runtime/utils/mcp_client.py +33 -6
  34. alita_sdk/runtime/utils/mcp_oauth.py +125 -8
  35. alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
  36. alita_sdk/runtime/utils/utils.py +2 -0
  37. alita_sdk/tools/__init__.py +15 -0
  38. alita_sdk/tools/ado/repos/__init__.py +10 -12
  39. alita_sdk/tools/ado/test_plan/__init__.py +23 -8
  40. alita_sdk/tools/ado/wiki/__init__.py +24 -8
  41. alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
  42. alita_sdk/tools/ado/work_item/__init__.py +24 -8
  43. alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
  44. alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
  45. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  46. alita_sdk/tools/azure_ai/search/__init__.py +9 -7
  47. alita_sdk/tools/base/tool.py +5 -1
  48. alita_sdk/tools/base_indexer_toolkit.py +25 -0
  49. alita_sdk/tools/bitbucket/__init__.py +14 -10
  50. alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
  51. alita_sdk/tools/browser/__init__.py +5 -4
  52. alita_sdk/tools/carrier/__init__.py +5 -6
  53. alita_sdk/tools/cloud/aws/__init__.py +9 -7
  54. alita_sdk/tools/cloud/azure/__init__.py +9 -7
  55. alita_sdk/tools/cloud/gcp/__init__.py +9 -7
  56. alita_sdk/tools/cloud/k8s/__init__.py +9 -7
  57. alita_sdk/tools/code/linter/__init__.py +9 -8
  58. alita_sdk/tools/code/sonar/__init__.py +9 -7
  59. alita_sdk/tools/confluence/__init__.py +15 -10
  60. alita_sdk/tools/custom_open_api/__init__.py +11 -5
  61. alita_sdk/tools/elastic/__init__.py +10 -8
  62. alita_sdk/tools/elitea_base.py +387 -9
  63. alita_sdk/tools/figma/__init__.py +8 -7
  64. alita_sdk/tools/github/__init__.py +12 -14
  65. alita_sdk/tools/github/github_client.py +68 -2
  66. alita_sdk/tools/github/tool.py +5 -1
  67. alita_sdk/tools/gitlab/__init__.py +14 -11
  68. alita_sdk/tools/gitlab/api_wrapper.py +81 -1
  69. alita_sdk/tools/gitlab_org/__init__.py +9 -8
  70. alita_sdk/tools/google/bigquery/__init__.py +12 -12
  71. alita_sdk/tools/google/bigquery/tool.py +5 -1
  72. alita_sdk/tools/google_places/__init__.py +9 -8
  73. alita_sdk/tools/jira/__init__.py +15 -10
  74. alita_sdk/tools/keycloak/__init__.py +10 -8
  75. alita_sdk/tools/localgit/__init__.py +8 -3
  76. alita_sdk/tools/localgit/local_git.py +62 -54
  77. alita_sdk/tools/localgit/tool.py +5 -1
  78. alita_sdk/tools/memory/__init__.py +11 -3
  79. alita_sdk/tools/ocr/__init__.py +10 -8
  80. alita_sdk/tools/openapi/__init__.py +6 -2
  81. alita_sdk/tools/pandas/__init__.py +9 -7
  82. alita_sdk/tools/postman/__init__.py +10 -11
  83. alita_sdk/tools/pptx/__init__.py +9 -9
  84. alita_sdk/tools/qtest/__init__.py +9 -8
  85. alita_sdk/tools/rally/__init__.py +9 -8
  86. alita_sdk/tools/report_portal/__init__.py +11 -9
  87. alita_sdk/tools/salesforce/__init__.py +9 -9
  88. alita_sdk/tools/servicenow/__init__.py +10 -8
  89. alita_sdk/tools/sharepoint/__init__.py +9 -8
  90. alita_sdk/tools/slack/__init__.py +8 -7
  91. alita_sdk/tools/sql/__init__.py +9 -8
  92. alita_sdk/tools/testio/__init__.py +9 -8
  93. alita_sdk/tools/testrail/__init__.py +10 -8
  94. alita_sdk/tools/utils/__init__.py +9 -4
  95. alita_sdk/tools/utils/text_operations.py +254 -0
  96. alita_sdk/tools/xray/__init__.py +10 -8
  97. alita_sdk/tools/yagmail/__init__.py +8 -3
  98. alita_sdk/tools/zephyr/__init__.py +8 -7
  99. alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
  100. alita_sdk/tools/zephyr_essential/__init__.py +9 -8
  101. alita_sdk/tools/zephyr_scale/__init__.py +9 -8
  102. alita_sdk/tools/zephyr_squad/__init__.py +9 -8
  103. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/METADATA +1 -1
  104. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/RECORD +108 -105
  105. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/WHEEL +0 -0
  106. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/entry_points.txt +0 -0
  107. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/licenses/LICENSE +0 -0
  108. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import logging
3
3
  from traceback import format_exc
4
- from typing import Any, Optional, List, Union
4
+ from typing import Any, Optional, List, Union, Literal
5
5
 
6
6
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
7
7
  from langchain_core.runnables import RunnableConfig
@@ -174,26 +174,36 @@ class LLMNode(BaseTool):
174
174
  for key, value in (self.structured_output_dict or {}).items()
175
175
  }
176
176
  # Add default output field for proper response to user
177
- struct_params['elitea_response'] = {'description': 'final output to user', 'type': 'str'}
177
+ struct_params['elitea_response'] = {
178
+ 'description': 'final output to user (summarized output from LLM)', 'type': 'str',
179
+ "default": None}
178
180
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
179
- completion = llm_client.invoke(messages, config=config)
180
- if hasattr(completion, 'tool_calls') and completion.tool_calls:
181
+ initial_completion = llm_client.invoke(messages, config=config)
182
+ if hasattr(initial_completion, 'tool_calls') and initial_completion.tool_calls:
181
183
  new_messages, _ = self._run_async_in_sync_context(
182
- self.__perform_tool_calling(completion, messages, llm_client, config)
184
+ self.__perform_tool_calling(initial_completion, messages, llm_client, config)
183
185
  )
184
186
  llm = self.__get_struct_output_model(llm_client, struct_model)
185
187
  completion = llm.invoke(new_messages, config=config)
186
188
  result = completion.model_dump()
187
189
  else:
188
- llm = self.__get_struct_output_model(llm_client, struct_model)
189
- completion = llm.invoke(messages, config=config)
190
+ try:
191
+ llm = self.__get_struct_output_model(llm_client, struct_model)
192
+ completion = llm.invoke(messages, config=config)
193
+ except ValueError as e:
194
+ logger.error(f"Error invoking structured output model: {format_exc()}")
195
+ logger.info("Attemping to fall back to json mode")
196
+ # Fallback to regular LLM with JSON extraction
197
+ completion = self.__get_struct_output_model(llm_client, struct_model,
198
+ method="json_mode").invoke(messages, config=config)
190
199
  result = completion.model_dump()
191
200
 
192
201
  # Ensure messages are properly formatted
193
202
  if result.get('messages') and isinstance(result['messages'], list):
194
203
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
195
204
  else:
196
- result['messages'] = messages + [AIMessage(content=result.get(ELITEA_RS, ''))]
205
+ result['messages'] = messages + [
206
+ AIMessage(content=result.get(ELITEA_RS, '') or initial_completion.content)]
197
207
 
198
208
  return result
199
209
  else:
@@ -650,5 +660,5 @@ class LLMNode(BaseTool):
650
660
 
651
661
  return new_messages, current_completion
652
662
 
653
- def __get_struct_output_model(self, llm_client, pydantic_model):
654
- return llm_client.with_structured_output(pydantic_model)
663
+ def __get_struct_output_model(self, llm_client, pydantic_model, method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema"):
664
+ return llm_client.with_structured_output(pydantic_model, method=method)
@@ -99,7 +99,6 @@ class McpRemoteTool(McpServerTool):
99
99
 
100
100
  async def _execute_remote_tool(self, kwargs: Dict[str, Any]) -> str:
101
101
  """Execute the actual remote MCP tool call using SSE client."""
102
- from ...tools.utils import TOOLKIT_SPLITTER
103
102
 
104
103
  # Check for session_id requirement
105
104
  if not self.session_id:
@@ -109,8 +108,8 @@ class McpRemoteTool(McpServerTool):
109
108
  # Use the original tool name from discovery for MCP server invocation
110
109
  tool_name_for_server = self.original_tool_name
111
110
  if not tool_name_for_server:
112
- tool_name_for_server = self.name.rsplit(TOOLKIT_SPLITTER, 1)[-1] if TOOLKIT_SPLITTER in self.name else self.name
113
- logger.warning(f"original_tool_name not set for '{self.name}', using extracted: {tool_name_for_server}")
111
+ tool_name_for_server = self.name
112
+ logger.warning(f"original_tool_name not set for '{self.name}', using: {tool_name_for_server}")
114
113
 
115
114
  logger.info(f"[MCP] Executing tool '{tool_name_for_server}' with session {self.session_id}")
116
115
 
@@ -5,8 +5,6 @@ from typing import Any, Type, Literal, Optional, Union, List
5
5
  from langchain_core.tools import BaseTool
6
6
  from pydantic import BaseModel, Field, create_model, EmailStr, constr, ConfigDict
7
7
 
8
- from ...tools.utils import TOOLKIT_SPLITTER
9
-
10
8
  logger = getLogger(__name__)
11
9
 
12
10
 
@@ -91,13 +89,13 @@ class McpServerTool(BaseTool):
91
89
  return create_model(model_name, **fields)
92
90
 
93
91
  def _run(self, *args, **kwargs):
94
- # Extract the actual tool/prompt name (remove toolkit prefix)
92
+ # Use the tool name directly (no prefix extraction needed)
95
93
  call_data = {
96
94
  "server": self.server,
97
95
  "tool_timeout_sec": self.tool_timeout_sec,
98
96
  "tool_call_id": str(uuid.uuid4()),
99
97
  "params": {
100
- "name": self.name.rsplit(TOOLKIT_SPLITTER)[1] if TOOLKIT_SPLITTER in self.name else self.name,
98
+ "name": self.name,
101
99
  "arguments": kwargs
102
100
  }
103
101
  }
@@ -128,10 +128,39 @@ class AlitaStreamlitCallback(BaseCallbackHandler):
128
128
 
129
129
  tool_name = args[0].get("name")
130
130
  tool_run_id = str(run_id)
131
+
132
+ # Extract metadata from tool if available (from BaseAction.metadata)
133
+ # Try multiple sources for metadata with toolkit_name
134
+ tool_meta = args[0].copy()
135
+
136
+ # Source 1: kwargs['serialized']['metadata'] - LangChain's full tool serialization
137
+ if 'serialized' in kwargs and 'metadata' in kwargs['serialized']:
138
+ tool_meta['metadata'] = kwargs['serialized']['metadata']
139
+ log.info(f"[METADATA] Extracted from serialized: {kwargs['serialized']['metadata']}")
140
+ # Source 2: Check if metadata is directly in args[0] (some LangChain versions)
141
+ elif 'metadata' in args[0]:
142
+ tool_meta['metadata'] = args[0]['metadata']
143
+ log.info(f"[METADATA] Extracted from args[0]: {args[0]['metadata']}")
144
+ else:
145
+ log.info(f"[METADATA] No metadata found. args[0] keys: {list(args[0].keys())}, kwargs keys: {list(kwargs.keys())}")
146
+ # Fallback: Try to extract toolkit_name from description
147
+ description = args[0].get('description', '')
148
+ if description:
149
+ import re
150
+ # Try pattern 1: [Toolkit: name]
151
+ match = re.search(r'\[Toolkit:\s*([^\]]+)\]', description)
152
+ if not match:
153
+ # Try pattern 2: Toolkit: name at start or end
154
+ match = re.search(r'(?:^|\n)Toolkit:\s*([^\n]+)', description)
155
+ if match:
156
+ toolkit_name = match.group(1).strip()
157
+ tool_meta['metadata'] = {'toolkit_name': toolkit_name}
158
+ log.info(f"[METADATA] Extracted toolkit_name from description: {toolkit_name}")
159
+
131
160
  payload = {
132
161
  "tool_name": tool_name,
133
162
  "tool_run_id": tool_run_id,
134
- "tool_meta": args[0],
163
+ "tool_meta": tool_meta,
135
164
  "tool_inputs": kwargs.get('inputs')
136
165
  }
137
166
  payload = json.loads(json.dumps(payload, ensure_ascii=False, default=lambda o: str(o)))
@@ -360,6 +360,7 @@ class McpClient:
360
360
  from .mcp_oauth import (
361
361
  canonical_resource,
362
362
  extract_resource_metadata_url,
363
+ extract_authorization_uri,
363
364
  fetch_resource_metadata_async,
364
365
  infer_authorization_servers_from_realm,
365
366
  fetch_oauth_authorization_server_metadata
@@ -368,13 +369,39 @@ class McpClient:
368
369
  auth_header = response.headers.get('WWW-Authenticate', '')
369
370
  resource_metadata_url = extract_resource_metadata_url(auth_header, self.url)
370
371
 
372
+ # First, try authorization_uri from WWW-Authenticate header (preferred)
373
+ authorization_uri = extract_authorization_uri(auth_header)
374
+
371
375
  metadata = None
372
- if resource_metadata_url:
373
- metadata = await fetch_resource_metadata_async(
374
- resource_metadata_url,
375
- session=self._http_session,
376
- timeout=30
377
- )
376
+ if authorization_uri:
377
+ # Fetch OAuth metadata directly from authorization_uri
378
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(authorization_uri, timeout=30)
379
+ if auth_server_metadata:
380
+ # Extract base authorization server URL from the issuer or the well-known URL
381
+ base_auth_server = auth_server_metadata.get('issuer')
382
+ if not base_auth_server and '/.well-known/' in authorization_uri:
383
+ base_auth_server = authorization_uri.split('/.well-known/')[0]
384
+
385
+ metadata = {
386
+ 'authorization_servers': [base_auth_server] if base_auth_server else [authorization_uri],
387
+ 'oauth_authorization_server': auth_server_metadata
388
+ }
389
+
390
+ # Fall back to resource_metadata if authorization_uri didn't work
391
+ if not metadata:
392
+ if resource_metadata_url:
393
+ metadata = await fetch_resource_metadata_async(
394
+ resource_metadata_url,
395
+ session=self._http_session,
396
+ timeout=30
397
+ )
398
+ # If we got resource_metadata, also fetch oauth_authorization_server
399
+ if metadata and metadata.get('authorization_servers'):
400
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(
401
+ metadata['authorization_servers'][0], timeout=30
402
+ )
403
+ if auth_server_metadata:
404
+ metadata['oauth_authorization_server'] = auth_server_metadata
378
405
 
379
406
  # Infer authorization servers if not in metadata
380
407
  if not metadata or not metadata.get('authorization_servers'):
@@ -43,6 +43,23 @@ class McpAuthorizationRequired(ToolException):
43
43
  }
44
44
 
45
45
 
46
+ def extract_authorization_uri(www_authenticate: Optional[str]) -> Optional[str]:
47
+ """
48
+ Extract authorization_uri from WWW-Authenticate header.
49
+ This points directly to the OAuth authorization server metadata URL.
50
+ Should be used before falling back to resource_metadata.
51
+ """
52
+ if not www_authenticate:
53
+ return None
54
+
55
+ # Look for authorization_uri="<url>" in the header
56
+ match = re.search(r'authorization_uri\s*=\s*\"?([^\", ]+)\"?', www_authenticate)
57
+ if match:
58
+ return match.group(1)
59
+
60
+ return None
61
+
62
+
46
63
  def extract_resource_metadata_url(www_authenticate: Optional[str], server_url: Optional[str] = None) -> Optional[str]:
47
64
  """
48
65
  Pull the resource_metadata URL from a WWW-Authenticate header if present.
@@ -62,15 +79,33 @@ def extract_resource_metadata_url(www_authenticate: Optional[str], server_url: O
62
79
  # or using well-known OAuth discovery endpoints directly
63
80
  return None
64
81
 
65
-
66
- def fetch_oauth_authorization_server_metadata(base_url: str, timeout: int = 10) -> Optional[Dict[str, Any]]:
82
+ def fetch_oauth_authorization_server_metadata(url: str, timeout: int = 10) -> Optional[Dict[str, Any]]:
67
83
  """
68
84
  Fetch OAuth authorization server metadata from well-known endpoints.
69
- Tries both oauth-authorization-server and openid-configuration discovery endpoints.
85
+
86
+ Args:
87
+ url: Either a full well-known URL (e.g., https://api.figma.com/.well-known/oauth-authorization-server)
88
+ or a base URL (e.g., https://api.figma.com) where we'll try discovery endpoints.
89
+ timeout: Request timeout in seconds.
90
+
91
+ Returns:
92
+ OAuth authorization server metadata dict, or None if not found.
70
93
  """
94
+ # If the URL is already a .well-known endpoint, try it directly first
95
+ if '/.well-known/' in url:
96
+ try:
97
+ resp = requests.get(url, timeout=timeout)
98
+ if resp.status_code == 200:
99
+ return resp.json()
100
+ except Exception as exc:
101
+ logger.debug(f"Failed to fetch OAuth metadata from {url}: {exc}")
102
+ # If direct fetch failed, don't try other endpoints
103
+ return None
104
+
105
+ # Otherwise, try standard discovery endpoints
71
106
  discovery_endpoints = [
72
- f"{base_url}/.well-known/oauth-authorization-server",
73
- f"{base_url}/.well-known/openid-configuration",
107
+ f"{url}/.well-known/oauth-authorization-server",
108
+ f"{url}/.well-known/openid-configuration",
74
109
  ]
75
110
 
76
111
  for endpoint in discovery_endpoints:
@@ -168,7 +203,7 @@ def exchange_oauth_token(
168
203
  token_endpoint: str,
169
204
  code: str,
170
205
  redirect_uri: str,
171
- client_id: str,
206
+ client_id: Optional[str] = None,
172
207
  client_secret: Optional[str] = None,
173
208
  code_verifier: Optional[str] = None,
174
209
  scope: Optional[str] = None,
@@ -184,7 +219,7 @@ def exchange_oauth_token(
184
219
  token_endpoint: OAuth token endpoint URL
185
220
  code: Authorization code from OAuth provider
186
221
  redirect_uri: Redirect URI used in authorization request
187
- client_id: OAuth client ID
222
+ client_id: OAuth client ID (optional for DCR/public clients)
188
223
  client_secret: OAuth client secret (optional for public clients)
189
224
  code_verifier: PKCE code verifier (optional)
190
225
  scope: OAuth scope (optional)
@@ -196,15 +231,22 @@ def exchange_oauth_token(
196
231
  Raises:
197
232
  requests.RequestException: If the HTTP request fails
198
233
  ValueError: If the token exchange fails
234
+
235
+ Note:
236
+ client_id may be optional for:
237
+ - Dynamic Client Registration (DCR): client_id may be in the code
238
+ - OIDC public clients: some providers don't require it
239
+ - Some MCP servers handle auth differently
199
240
  """
200
241
  # Build the token request body
201
242
  token_body = {
202
243
  "grant_type": "authorization_code",
203
244
  "code": code,
204
245
  "redirect_uri": redirect_uri,
205
- "client_id": client_id,
206
246
  }
207
247
 
248
+ if client_id:
249
+ token_body["client_id"] = client_id
208
250
  if client_secret:
209
251
  token_body["client_secret"] = client_secret
210
252
  if code_verifier:
@@ -242,3 +284,78 @@ def exchange_oauth_token(
242
284
  logger.error(f"MCP OAuth: token exchange failed - {response.status_code}: {error_msg}")
243
285
  raise ValueError(f"Token exchange failed: {error_msg}")
244
286
 
287
+
288
+ def refresh_oauth_token(
289
+ token_endpoint: str,
290
+ refresh_token: str,
291
+ client_id: Optional[str] = None,
292
+ client_secret: Optional[str] = None,
293
+ scope: Optional[str] = None,
294
+ timeout: int = 30,
295
+ ) -> Dict[str, Any]:
296
+ """
297
+ Refresh an OAuth access token using a refresh token.
298
+
299
+ Args:
300
+ token_endpoint: OAuth token endpoint URL
301
+ refresh_token: Refresh token from previous authorization
302
+ client_id: OAuth client ID (optional for DCR/public clients)
303
+ client_secret: OAuth client secret (optional for public clients)
304
+ scope: OAuth scope (optional)
305
+ timeout: Request timeout in seconds
306
+
307
+ Returns:
308
+ Token response from OAuth provider containing access_token, etc.
309
+ May also include a new refresh_token depending on the provider.
310
+
311
+ Raises:
312
+ requests.RequestException: If the HTTP request fails
313
+ ValueError: If the token refresh fails
314
+
315
+ Note:
316
+ client_id may be optional for:
317
+ - Dynamic Client Registration (DCR): client_id embedded in refresh_token
318
+ - OIDC public clients: some providers don't require it
319
+ - Some MCP servers handle auth differently
320
+ """
321
+ token_body = {
322
+ "grant_type": "refresh_token",
323
+ "refresh_token": refresh_token,
324
+ }
325
+
326
+ if client_id:
327
+ token_body["client_id"] = client_id
328
+ if client_secret:
329
+ token_body["client_secret"] = client_secret
330
+ if scope:
331
+ token_body["scope"] = scope
332
+
333
+ logger.info(f"MCP OAuth: refreshing token at {token_endpoint}")
334
+
335
+ response = requests.post(
336
+ token_endpoint,
337
+ data=token_body,
338
+ headers={
339
+ "Content-Type": "application/x-www-form-urlencoded",
340
+ "Accept": "application/json",
341
+ },
342
+ timeout=timeout
343
+ )
344
+
345
+ # Try to parse as JSON
346
+ try:
347
+ token_data = response.json()
348
+ except Exception:
349
+ # Some providers return URL-encoded response
350
+ from urllib.parse import parse_qs
351
+ token_data = {k: v[0] if len(v) == 1 else v
352
+ for k, v in parse_qs(response.text).items()}
353
+
354
+ if response.ok:
355
+ logger.info("MCP OAuth: token refresh successful")
356
+ return token_data
357
+ else:
358
+ error_msg = token_data.get("error_description") or token_data.get("error") or response.text
359
+ logger.error(f"MCP OAuth: token refresh failed - {response.status_code}: {error_msg}")
360
+ raise ValueError(f"Token refresh failed: {error_msg}")
361
+
@@ -71,6 +71,7 @@ class McpSseClient:
71
71
  McpAuthorizationRequired,
72
72
  canonical_resource,
73
73
  extract_resource_metadata_url,
74
+ extract_authorization_uri,
74
75
  fetch_resource_metadata_async,
75
76
  infer_authorization_servers_from_realm,
76
77
  fetch_oauth_authorization_server_metadata
@@ -79,13 +80,41 @@ class McpSseClient:
79
80
  auth_header = self._stream_response.headers.get('WWW-Authenticate', '')
80
81
  resource_metadata_url = extract_resource_metadata_url(auth_header, self.url)
81
82
 
83
+ # First, try authorization_uri from WWW-Authenticate header (preferred)
84
+ authorization_uri = extract_authorization_uri(auth_header)
85
+
82
86
  metadata = None
83
- if resource_metadata_url:
84
- metadata = await fetch_resource_metadata_async(
85
- resource_metadata_url,
86
- session=self._stream_session,
87
- timeout=30
88
- )
87
+ if authorization_uri:
88
+ # Fetch OAuth metadata directly from authorization_uri
89
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(authorization_uri, timeout=30)
90
+ if auth_server_metadata:
91
+ # Extract base authorization server URL from the issuer or the well-known URL
92
+ base_auth_server = auth_server_metadata.get('issuer')
93
+ if not base_auth_server and '/.well-known/' in authorization_uri:
94
+ base_auth_server = authorization_uri.split('/.well-known/')[0]
95
+
96
+ metadata = {
97
+ 'authorization_servers': [base_auth_server] if base_auth_server else [authorization_uri],
98
+ 'oauth_authorization_server': auth_server_metadata
99
+ }
100
+ logger.info(f"[MCP SSE Client] Using authorization_uri: {authorization_uri}, base: {base_auth_server}")
101
+
102
+ # Fall back to resource_metadata if authorization_uri didn't work
103
+ if not metadata:
104
+ if resource_metadata_url:
105
+ metadata = await fetch_resource_metadata_async(
106
+ resource_metadata_url,
107
+ session=self._stream_session,
108
+ timeout=30
109
+ )
110
+ # If we got resource_metadata, also fetch oauth_authorization_server
111
+ if metadata and metadata.get('authorization_servers'):
112
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(
113
+ metadata['authorization_servers'][0], timeout=30
114
+ )
115
+ if auth_server_metadata:
116
+ metadata['oauth_authorization_server'] = auth_server_metadata
117
+ logger.info(f"[MCP SSE Client] Fetched OAuth metadata from resource_metadata")
89
118
 
90
119
  # Infer authorization servers if not in metadata
91
120
  if not metadata or not metadata.get('authorization_servers'):
@@ -1,6 +1,8 @@
1
1
  import re
2
2
  from enum import Enum
3
3
 
4
+ # DEPRECATED: Tool names no longer use prefixes
5
+ # Kept for backward compatibility only
4
6
  TOOLKIT_SPLITTER = "___"
5
7
 
6
8
  class IndexerKeywords(Enum):
@@ -45,6 +45,9 @@ def _safe_import_tool(tool_name, module_path, get_tools_name=None, toolkit_class
45
45
  imported = {}
46
46
  if get_tools_name and hasattr(module, get_tools_name):
47
47
  imported['get_tools'] = getattr(module, get_tools_name)
48
+
49
+ if hasattr(module, 'get_toolkit'):
50
+ imported['get_toolkit'] = getattr(module, 'get_toolkit')
48
51
 
49
52
  if toolkit_class_name and hasattr(module, toolkit_class_name):
50
53
  imported['toolkit_class'] = getattr(module, toolkit_class_name)
@@ -209,6 +212,18 @@ def get_toolkits():
209
212
  logger.info(f"Successfully loaded {len(toolkit_configs)} toolkit configurations")
210
213
  return toolkit_configs
211
214
 
215
+ def instantiate_toolkit(tool_config):
216
+ """Instantiate a toolkit from its configuration."""
217
+ tool_type = tool_config.get('type')
218
+
219
+ if tool_type in AVAILABLE_TOOLS:
220
+ tool_module = AVAILABLE_TOOLS[tool_type]
221
+
222
+ if 'get_toolkit' in tool_module:
223
+ return tool_module['get_toolkit'](tool_config)
224
+
225
+ raise ValueError(f"Toolkit type '{tool_type}' does not support direct instantiation or is not available.")
226
+
212
227
  def get_available_tools():
213
228
  """Return list of available tool types."""
214
229
  return list(AVAILABLE_TOOLS.keys())
@@ -10,12 +10,12 @@ from ....configurations.ado import AdoReposConfiguration
10
10
  from ....configurations.pgvector import PgVectorConfiguration
11
11
  from ...base.tool import BaseAction
12
12
  from .repos_wrapper import ReposApiWrapper
13
- from ...utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length, check_connection_response
13
+ from ...utils import clean_string, get_max_toolkit_length, check_connection_response
14
14
 
15
15
  name = "ado_repos"
16
16
 
17
17
 
18
- def _get_toolkit(tool) -> BaseToolkit:
18
+ def get_toolkit(tool) -> BaseToolkit:
19
19
  return AzureDevOpsReposToolkit().get_toolkit(
20
20
  selected_tools=tool['settings'].get('selected_tools', []),
21
21
  ado_repos_configuration=tool['settings']['ado_repos_configuration'],
@@ -30,20 +30,15 @@ def _get_toolkit(tool) -> BaseToolkit:
30
30
  llm=tool['settings'].get('llm', None),
31
31
  )
32
32
 
33
- def get_toolkit():
34
- return AzureDevOpsReposToolkit.toolkit_config_schema()
35
-
36
33
  def get_tools(tool):
37
- return _get_toolkit(tool).get_tools()
34
+ return get_toolkit(tool).get_tools()
38
35
 
39
36
  class AzureDevOpsReposToolkit(BaseToolkit):
40
37
  tools: List[BaseTool] = []
41
- toolkit_max_length: int = 0
42
38
 
43
39
  @staticmethod
44
40
  def toolkit_config_schema() -> BaseModel:
45
41
  selected_tools = {x['name']: x['args_schema'].schema() for x in ReposApiWrapper.model_construct().get_available_tools()}
46
- AzureDevOpsReposToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
47
42
  m = create_model(
48
43
  name,
49
44
  ado_repos_configuration=(AdoReposConfiguration, Field(description="Ado Repos configuration", default=None,
@@ -63,7 +58,6 @@ class AzureDevOpsReposToolkit(BaseToolkit):
63
58
  "icon_url": "ado-repos-icon.svg",
64
59
  "categories": ["code repositories"],
65
60
  "extra_categories": ["code", "repository", "version control"],
66
- "max_length": AzureDevOpsReposToolkit.toolkit_max_length
67
61
  }}}
68
62
  )
69
63
 
@@ -99,17 +93,21 @@ class AzureDevOpsReposToolkit(BaseToolkit):
99
93
  azure_devops_repos_wrapper = ReposApiWrapper(**wrapper_payload)
100
94
  available_tools = azure_devops_repos_wrapper.get_available_tools()
101
95
  tools = []
102
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
103
96
  for tool in available_tools:
104
97
  if selected_tools:
105
98
  if tool["name"] not in selected_tools:
106
99
  continue
100
+ description = tool["description"] + f"\nADO instance: {azure_devops_repos_wrapper.organization_url}/{azure_devops_repos_wrapper.project}"
101
+ if toolkit_name:
102
+ description = f"{description}\nToolkit: {toolkit_name}"
103
+ description = description[:1000]
107
104
  tools.append(
108
105
  BaseAction(
109
106
  api_wrapper=azure_devops_repos_wrapper,
110
- name=prefix + tool["name"],
111
- description=tool["description"] + f"\nADO instance: {azure_devops_repos_wrapper.organization_url}/{azure_devops_repos_wrapper.project}",
107
+ name=tool["name"],
108
+ description=description,
112
109
  args_schema=tool["args_schema"],
110
+ metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
113
111
  )
114
112
  )
115
113
  return cls(tools=tools)
@@ -10,21 +10,33 @@ from ....configurations.ado import AdoConfiguration
10
10
  from ....configurations.pgvector import PgVectorConfiguration
11
11
  from .test_plan_wrapper import TestPlanApiWrapper
12
12
  from ...base.tool import BaseAction
13
- from ...utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length, check_connection_response
13
+ from ...utils import clean_string, get_max_toolkit_length, check_connection_response
14
14
 
15
15
 
16
16
  name = "azure_devops_plans"
17
17
  name_alias = "ado_plans"
18
18
 
19
+ def get_toolkit(tool):
20
+ return AzureDevOpsPlansToolkit().get_toolkit(
21
+ selected_tools=tool['settings'].get('selected_tools', []),
22
+ ado_configuration=tool['settings']['ado_configuration'],
23
+ limit=tool['settings'].get('limit', 5),
24
+ toolkit_name=tool.get('toolkit_name', ''),
25
+ alita=tool['settings'].get('alita', None),
26
+ llm=tool['settings'].get('llm', None),
27
+ pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
28
+ collection_name=tool['toolkit_name'],
29
+ doctype='doc',
30
+ embedding_model=tool['settings'].get('embedding_model'),
31
+ vectorstore_type="PGVector"
32
+ )
19
33
 
20
34
  class AzureDevOpsPlansToolkit(BaseToolkit):
21
35
  tools: List[BaseTool] = []
22
- toolkit_max_length: int = 0
23
36
 
24
37
  @staticmethod
25
38
  def toolkit_config_schema() -> BaseModel:
26
39
  selected_tools = {x['name']: x['args_schema'].schema() for x in TestPlanApiWrapper.model_construct().get_available_tools()}
27
- AzureDevOpsPlansToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
28
40
  m = create_model(
29
41
  name_alias,
30
42
  ado_configuration=(AdoConfiguration, Field(description="Ado configuration", json_schema_extra={'configuration_types': ['ado']})),
@@ -39,7 +51,6 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
39
51
  {
40
52
  "label": "ADO plans",
41
53
  "icon_url": "ado-plans.svg",
42
- "max_length": AzureDevOpsPlansToolkit.toolkit_max_length,
43
54
  "categories": ["test management"],
44
55
  "extra_categories": ["test case management", "qa"],
45
56
  "sections": {
@@ -97,17 +108,21 @@ class AzureDevOpsPlansToolkit(BaseToolkit):
97
108
  azure_devops_api_wrapper = TestPlanApiWrapper(**wrapper_payload)
98
109
  available_tools = azure_devops_api_wrapper.get_available_tools()
99
110
  tools = []
100
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
101
111
  for tool in available_tools:
102
112
  if selected_tools:
103
113
  if tool["name"] not in selected_tools:
104
114
  continue
105
115
  print(tool)
116
+ description = tool["description"] + f"\nADO instance: {azure_devops_api_wrapper.organization_url}"
117
+ if toolkit_name:
118
+ description = f"{description}\nToolkit: {toolkit_name}"
119
+ description = description[:1000]
106
120
  tools.append(BaseAction(
107
121
  api_wrapper=azure_devops_api_wrapper,
108
- name=prefix + tool["name"],
109
- description=tool["description"] + f"\nADO instance: {azure_devops_api_wrapper.organization_url}",
110
- args_schema=tool["args_schema"]
122
+ name=tool["name"],
123
+ description=description,
124
+ args_schema=tool["args_schema"],
125
+ metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
111
126
  ))
112
127
  return cls(tools=tools)
113
128