alita-sdk 0.3.497__py3-none-any.whl → 0.3.516__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (109) hide show
  1. alita_sdk/cli/inventory.py +12 -195
  2. alita_sdk/community/inventory/__init__.py +12 -0
  3. alita_sdk/community/inventory/toolkit.py +9 -5
  4. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  5. alita_sdk/configurations/ado.py +144 -0
  6. alita_sdk/configurations/confluence.py +76 -42
  7. alita_sdk/configurations/figma.py +76 -0
  8. alita_sdk/configurations/gitlab.py +2 -0
  9. alita_sdk/configurations/qtest.py +72 -1
  10. alita_sdk/configurations/report_portal.py +96 -0
  11. alita_sdk/configurations/sharepoint.py +148 -0
  12. alita_sdk/configurations/testio.py +83 -0
  13. alita_sdk/runtime/clients/artifact.py +2 -2
  14. alita_sdk/runtime/clients/client.py +24 -19
  15. alita_sdk/runtime/clients/sandbox_client.py +14 -0
  16. alita_sdk/runtime/langchain/assistant.py +64 -23
  17. alita_sdk/runtime/langchain/constants.py +270 -1
  18. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  19. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
  20. alita_sdk/runtime/langchain/document_loaders/constants.py +2 -1
  21. alita_sdk/runtime/langchain/langraph_agent.py +8 -9
  22. alita_sdk/runtime/langchain/utils.py +6 -1
  23. alita_sdk/runtime/toolkits/artifact.py +14 -5
  24. alita_sdk/runtime/toolkits/datasource.py +13 -6
  25. alita_sdk/runtime/toolkits/mcp.py +26 -157
  26. alita_sdk/runtime/toolkits/planning.py +10 -5
  27. alita_sdk/runtime/toolkits/tools.py +23 -7
  28. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  29. alita_sdk/runtime/tools/artifact.py +139 -6
  30. alita_sdk/runtime/tools/llm.py +20 -10
  31. alita_sdk/runtime/tools/mcp_remote_tool.py +2 -3
  32. alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
  33. alita_sdk/runtime/utils/AlitaCallback.py +30 -1
  34. alita_sdk/runtime/utils/mcp_client.py +33 -6
  35. alita_sdk/runtime/utils/mcp_oauth.py +125 -8
  36. alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
  37. alita_sdk/runtime/utils/utils.py +2 -0
  38. alita_sdk/tools/__init__.py +15 -0
  39. alita_sdk/tools/ado/repos/__init__.py +10 -12
  40. alita_sdk/tools/ado/test_plan/__init__.py +23 -8
  41. alita_sdk/tools/ado/wiki/__init__.py +24 -8
  42. alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
  43. alita_sdk/tools/ado/work_item/__init__.py +24 -8
  44. alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
  45. alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
  46. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  47. alita_sdk/tools/azure_ai/search/__init__.py +9 -7
  48. alita_sdk/tools/base/tool.py +5 -1
  49. alita_sdk/tools/base_indexer_toolkit.py +25 -0
  50. alita_sdk/tools/bitbucket/__init__.py +14 -10
  51. alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
  52. alita_sdk/tools/browser/__init__.py +5 -4
  53. alita_sdk/tools/carrier/__init__.py +5 -6
  54. alita_sdk/tools/cloud/aws/__init__.py +9 -7
  55. alita_sdk/tools/cloud/azure/__init__.py +9 -7
  56. alita_sdk/tools/cloud/gcp/__init__.py +9 -7
  57. alita_sdk/tools/cloud/k8s/__init__.py +9 -7
  58. alita_sdk/tools/code/linter/__init__.py +9 -8
  59. alita_sdk/tools/code/sonar/__init__.py +9 -7
  60. alita_sdk/tools/confluence/__init__.py +15 -10
  61. alita_sdk/tools/custom_open_api/__init__.py +11 -5
  62. alita_sdk/tools/elastic/__init__.py +10 -8
  63. alita_sdk/tools/elitea_base.py +387 -9
  64. alita_sdk/tools/figma/__init__.py +8 -7
  65. alita_sdk/tools/github/__init__.py +12 -14
  66. alita_sdk/tools/github/github_client.py +68 -2
  67. alita_sdk/tools/github/tool.py +5 -1
  68. alita_sdk/tools/gitlab/__init__.py +14 -11
  69. alita_sdk/tools/gitlab/api_wrapper.py +81 -1
  70. alita_sdk/tools/gitlab_org/__init__.py +9 -8
  71. alita_sdk/tools/google/bigquery/__init__.py +12 -12
  72. alita_sdk/tools/google/bigquery/tool.py +5 -1
  73. alita_sdk/tools/google_places/__init__.py +9 -8
  74. alita_sdk/tools/jira/__init__.py +15 -10
  75. alita_sdk/tools/keycloak/__init__.py +10 -8
  76. alita_sdk/tools/localgit/__init__.py +8 -3
  77. alita_sdk/tools/localgit/local_git.py +62 -54
  78. alita_sdk/tools/localgit/tool.py +5 -1
  79. alita_sdk/tools/memory/__init__.py +11 -3
  80. alita_sdk/tools/ocr/__init__.py +10 -8
  81. alita_sdk/tools/openapi/__init__.py +6 -2
  82. alita_sdk/tools/pandas/__init__.py +9 -7
  83. alita_sdk/tools/postman/__init__.py +10 -11
  84. alita_sdk/tools/pptx/__init__.py +9 -9
  85. alita_sdk/tools/qtest/__init__.py +9 -8
  86. alita_sdk/tools/rally/__init__.py +9 -8
  87. alita_sdk/tools/report_portal/__init__.py +11 -9
  88. alita_sdk/tools/salesforce/__init__.py +9 -9
  89. alita_sdk/tools/servicenow/__init__.py +10 -8
  90. alita_sdk/tools/sharepoint/__init__.py +9 -8
  91. alita_sdk/tools/slack/__init__.py +8 -7
  92. alita_sdk/tools/sql/__init__.py +9 -8
  93. alita_sdk/tools/testio/__init__.py +9 -8
  94. alita_sdk/tools/testrail/__init__.py +10 -8
  95. alita_sdk/tools/utils/__init__.py +9 -4
  96. alita_sdk/tools/utils/text_operations.py +254 -0
  97. alita_sdk/tools/xray/__init__.py +10 -8
  98. alita_sdk/tools/yagmail/__init__.py +8 -3
  99. alita_sdk/tools/zephyr/__init__.py +8 -7
  100. alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
  101. alita_sdk/tools/zephyr_essential/__init__.py +9 -8
  102. alita_sdk/tools/zephyr_scale/__init__.py +9 -8
  103. alita_sdk/tools/zephyr_squad/__init__.py +9 -8
  104. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/METADATA +1 -1
  105. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/RECORD +109 -106
  106. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/WHEEL +0 -0
  107. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/entry_points.txt +0 -0
  108. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/licenses/LICENSE +0 -0
  109. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,7 @@ from ..tools.image_generation import ImageGenerationToolkit
21
21
  from ...community import get_toolkits as community_toolkits, get_tools as community_tools
22
22
  from ...tools.memory import MemoryToolkit
23
23
  from ..utils.mcp_oauth import canonical_resource, McpAuthorizationRequired
24
- from ...tools.utils import TOOLKIT_SPLITTER
24
+ from ...tools.utils import clean_string
25
25
  from alita_sdk.tools import _inject_toolkit_id
26
26
 
27
27
  logger = logging.getLogger(__name__)
@@ -41,7 +41,7 @@ def get_toolkits():
41
41
  return core_toolkits + community_toolkits() + alita_toolkits()
42
42
 
43
43
 
44
- def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None) -> list:
44
+ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None) -> list:
45
45
  prompts = []
46
46
  tools = []
47
47
 
@@ -110,10 +110,11 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
110
110
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
111
111
  embedding_model=tool['settings'].get('embedding_model'),
112
112
  collection_name=f"{tool.get('toolkit_name')}",
113
- collection_schema=str(tool['id']),
113
+ collection_schema=str(tool['settings'].get('id', tool.get('id', ''))),
114
114
  ).get_tools()
115
115
  # Inject toolkit_id for artifact tools as well
116
- _inject_toolkit_id(tool, toolkit_tools)
116
+ # Pass settings as the tool config since that's where the id field is
117
+ _inject_toolkit_id(tool['settings'], toolkit_tools)
117
118
  tools.extend(toolkit_tools)
118
119
 
119
120
  elif tool['type'] == 'vectorstore':
@@ -164,6 +165,14 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
164
165
  # remote mcp tool initialization with token injection
165
166
  settings = dict(tool['settings'])
166
167
  url = settings.get('url')
168
+
169
+ # Check if this MCP server should be ignored (user chose to continue without auth)
170
+ if ignored_mcp_servers and url:
171
+ canonical_url = canonical_resource(url)
172
+ if canonical_url in ignored_mcp_servers or url in ignored_mcp_servers:
173
+ logger.info(f"[MCP Auth] Skipping ignored MCP server: {url}")
174
+ continue
175
+
167
176
  headers = settings.get('headers')
168
177
  token_data = None
169
178
  session_id = None
@@ -319,11 +328,18 @@ def _mcp_tools(tools_list, alita):
319
328
 
320
329
  def _init_single_mcp_tool(server_toolkit_name, toolkit_name, available_tool, alita, toolkit_settings):
321
330
  try:
322
-
323
- tool_name = f'{toolkit_name}{TOOLKIT_SPLITTER}{available_tool["name"]}'
331
+ # Use clean tool name without prefix
332
+ tool_name = available_tool["name"]
333
+ # Add toolkit context to description (max 1000 chars)
334
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
335
+ base_description = f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}"
336
+ description = base_description
337
+ if toolkit_context and len(base_description + toolkit_context) <= 1000:
338
+ description = base_description + toolkit_context
339
+
324
340
  return McpServerTool(
325
341
  name=tool_name,
326
- description=f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}",
342
+ description=description,
327
343
  args_schema=McpServerTool.create_pydantic_model_from_schema(
328
344
  available_tool.get("inputSchema", {})
329
345
  ),
@@ -1,7 +1,7 @@
1
1
  from logging import getLogger
2
2
  from typing import Any, List, Literal, Optional
3
3
 
4
- from alita_sdk.tools.utils import clean_string, TOOLKIT_SPLITTER
4
+ from alita_sdk.tools.utils import clean_string
5
5
  from pydantic import BaseModel, create_model, Field, ConfigDict
6
6
  from langchain_core.tools import BaseToolkit, BaseTool
7
7
  from alita_sdk.tools.base.tool import BaseAction
@@ -31,7 +31,8 @@ class VectorStoreToolkit(BaseToolkit):
31
31
  toolkit_name: Optional[str] = None,
32
32
  selected_tools: list[str] = []):
33
33
  logger.info("Selected tools: %s", selected_tools)
34
- prefix = clean_string(toolkit_name) + TOOLKIT_SPLITTER if toolkit_name else ''
34
+ # Use clean toolkit name for context (max 1000 chars in description)
35
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
35
36
  if selected_tools is None:
36
37
  selected_tools = []
37
38
  tools = []
@@ -46,11 +47,16 @@ class VectorStoreToolkit(BaseToolkit):
46
47
  # if selected_tools:
47
48
  # if tool["name"] not in selected_tools:
48
49
  # continue
50
+ # Add toolkit context to description with character limit
51
+ description = tool["description"]
52
+ if toolkit_context and len(description + toolkit_context) <= 1000:
53
+ description = description + toolkit_context
49
54
  tools.append(BaseAction(
50
55
  api_wrapper=vectorstore_wrapper,
51
- name=f'{prefix}{tool["name"]}',
52
- description=tool["description"],
53
- args_schema=tool["args_schema"]
56
+ name=tool["name"],
57
+ description=description,
58
+ args_schema=tool["args_schema"],
59
+ metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
54
60
  ))
55
61
  return cls(tools=tools)
56
62
 
@@ -13,6 +13,7 @@ from pydantic import create_model, Field, model_validator
13
13
 
14
14
  from ...tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
15
15
  from ...tools.utils.available_tools_decorator import extend_with_parent_available_tools
16
+ from ...tools.elitea_base import extend_with_file_operations, BaseCodeToolApiWrapper
16
17
  from ...runtime.utils.utils import IndexerKeywords
17
18
 
18
19
 
@@ -20,6 +21,12 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
20
21
  bucket: str
21
22
  artifact: Optional[Any] = None
22
23
 
24
+ # Import file operation methods from BaseCodeToolApiWrapper
25
+ read_file_chunk = BaseCodeToolApiWrapper.read_file_chunk
26
+ read_multiple_files = BaseCodeToolApiWrapper.read_multiple_files
27
+ search_file = BaseCodeToolApiWrapper.search_file
28
+ edit_file = BaseCodeToolApiWrapper.edit_file
29
+
23
30
  @model_validator(mode='before')
24
31
  @classmethod
25
32
  def validate_toolkit(cls, values):
@@ -31,7 +38,24 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
31
38
  return super().validate_toolkit(values)
32
39
 
33
40
  def list_files(self, bucket_name = None, return_as_string = True):
34
- return self.artifact.list(bucket_name, return_as_string)
41
+ """List all files in the artifact bucket with API download links."""
42
+ result = self.artifact.list(bucket_name, return_as_string=False)
43
+
44
+ # Add API download link to each file
45
+ if isinstance(result, dict) and 'rows' in result:
46
+ bucket = bucket_name or self.bucket
47
+
48
+ # Get base_url and project_id from alita client
49
+ base_url = getattr(self.alita, 'base_url', '').rstrip('/')
50
+ project_id = getattr(self.alita, 'project_id', '')
51
+
52
+ for file_info in result['rows']:
53
+ if 'name' in file_info:
54
+ # Generate API download link
55
+ file_name = file_info['name']
56
+ file_info['link'] = f"{base_url}/api/v2/artifacts/artifact/default/{project_id}/{bucket}/{file_name}"
57
+
58
+ return str(result) if return_as_string else result
35
59
 
36
60
  def create_file(self, filename: str, filedata: str, bucket_name = None):
37
61
  # Sanitize filename to prevent regex errors during indexing
@@ -128,6 +152,94 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
128
152
  sheet_name=sheet_name,
129
153
  excel_by_sheets=excel_by_sheets,
130
154
  llm=self.llm)
155
+
156
+ def _read_file(
157
+ self,
158
+ file_path: str,
159
+ branch: str = None,
160
+ bucket_name: str = None,
161
+ **kwargs
162
+ ) -> str:
163
+ """
164
+ Read a file from artifact bucket with optional partial read support.
165
+
166
+ Parameters:
167
+ file_path: Name of the file in the bucket
168
+ branch: Not used for artifacts (kept for API consistency)
169
+ bucket_name: Name of the bucket (uses default if None)
170
+ **kwargs: Additional parameters (offset, limit, head, tail) - currently ignored,
171
+ partial read handled client-side by base class methods
172
+
173
+ Returns:
174
+ File content as string
175
+ """
176
+ return self.read_file(filename=file_path, bucket_name=bucket_name)
177
+
178
+ def _write_file(
179
+ self,
180
+ file_path: str,
181
+ content: str,
182
+ branch: str = None,
183
+ commit_message: str = None,
184
+ bucket_name: str = None
185
+ ) -> str:
186
+ """
187
+ Write content to a file (create or overwrite).
188
+
189
+ Parameters:
190
+ file_path: Name of the file in the bucket
191
+ content: New file content
192
+ branch: Not used for artifacts (kept for API consistency)
193
+ commit_message: Not used for artifacts (kept for API consistency)
194
+ bucket_name: Name of the bucket (uses default if None)
195
+
196
+ Returns:
197
+ Success message
198
+ """
199
+ try:
200
+ # Sanitize filename
201
+ sanitized_filename, was_modified = self._sanitize_filename(file_path)
202
+ if was_modified:
203
+ logging.warning(f"Filename sanitized: '{file_path}' -> '{sanitized_filename}'")
204
+
205
+ # Check if file exists
206
+ try:
207
+ self.artifact.get(artifact_name=sanitized_filename, bucket_name=bucket_name, llm=self.llm)
208
+ # File exists, overwrite it
209
+ result = self.artifact.overwrite(sanitized_filename, content, bucket_name)
210
+
211
+ # Dispatch custom event
212
+ dispatch_custom_event("file_modified", {
213
+ "message": f"File '{sanitized_filename}' updated successfully",
214
+ "filename": sanitized_filename,
215
+ "tool_name": "edit_file",
216
+ "toolkit": "artifact",
217
+ "operation_type": "modify",
218
+ "meta": {
219
+ "bucket": bucket_name or self.bucket
220
+ }
221
+ })
222
+
223
+ return f"Updated file {sanitized_filename}"
224
+ except:
225
+ # File doesn't exist, create it
226
+ result = self.artifact.create(sanitized_filename, content, bucket_name)
227
+
228
+ # Dispatch custom event
229
+ dispatch_custom_event("file_modified", {
230
+ "message": f"File '{sanitized_filename}' created successfully",
231
+ "filename": sanitized_filename,
232
+ "tool_name": "edit_file",
233
+ "toolkit": "artifact",
234
+ "operation_type": "create",
235
+ "meta": {
236
+ "bucket": bucket_name or self.bucket
237
+ }
238
+ })
239
+
240
+ return f"Created file {sanitized_filename}"
241
+ except Exception as e:
242
+ raise ToolException(f"Unable to write file {file_path}: {str(e)}")
131
243
 
132
244
  def delete_file(self, filename: str, bucket_name = None):
133
245
  return self.artifact.delete(filename, bucket_name)
@@ -167,7 +279,11 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
167
279
  return result
168
280
 
169
281
  def create_new_bucket(self, bucket_name: str, expiration_measure = "weeks", expiration_value = 1):
170
- return self.artifact.client.create_bucket(bucket_name, expiration_measure, expiration_value)
282
+ # Sanitize bucket name: replace underscores with hyphens and ensure lowercase
283
+ sanitized_name = bucket_name.replace('_', '-').lower()
284
+ if sanitized_name != bucket_name:
285
+ logging.warning(f"Bucket name '{bucket_name}' was sanitized to '{sanitized_name}' (underscores replaced with hyphens, converted to lowercase)")
286
+ return self.artifact.client.create_bucket(sanitized_name, expiration_measure, expiration_value)
171
287
 
172
288
  def _index_tool_params(self):
173
289
  return {
@@ -236,14 +352,17 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
236
352
  logging.error(f"Failed while parsing the file '{document.metadata['name']}': {e}")
237
353
  yield document
238
354
 
239
- @extend_with_parent_available_tools
355
+ @extend_with_file_operations
240
356
  def get_available_tools(self):
357
+ """Get available tools. Returns all tools for schema; filtering happens at toolkit level."""
241
358
  bucket_name = (Optional[str], Field(description="Name of the bucket to work with."
242
359
  "If bucket is not specified by user directly, the name should be taken from chat history."
243
360
  "If bucket never mentioned in chat, the name will be taken from tool configuration."
244
361
  " ***IMPORTANT*** Underscore `_` is prohibited in bucket name and should be replaced by `-`",
245
362
  default=None))
246
- return [
363
+
364
+ # Basic artifact tools (always available)
365
+ basic_tools = [
247
366
  {
248
367
  "ref": self.list_files,
249
368
  "name": "listFiles",
@@ -328,11 +447,25 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
328
447
  "description": "Creates new bucket specified by user.",
329
448
  "args_schema": create_model(
330
449
  "createNewBucket",
331
- bucket_name=(str, Field(description="Bucket name to create. ***IMPORTANT*** Underscore `_` is prohibited in bucket name and should be replaced by `-`.")),
450
+ bucket_name=(str, Field(
451
+ description="Bucket name to create. Must start with lowercase letter and contain only lowercase letters, numbers, and hyphens. Underscores will be automatically converted to hyphens.",
452
+ pattern=r'^[a-z][a-z0-9_-]*$' # Allow underscores in input, will be sanitized
453
+ )),
332
454
  expiration_measure=(Optional[str], Field(description="Measure of expiration time for bucket configuration."
333
455
  "Possible values: `days`, `weeks`, `months`, `years`.",
334
456
  default="weeks")),
335
457
  expiration_value=(Optional[int], Field(description="Expiration time values.", default=1))
336
458
  )
337
459
  }
338
- ]
460
+ ]
461
+
462
+ # Always include indexing tools in available tools list
463
+ # Filtering based on vector store config happens at toolkit level via decorator
464
+ try:
465
+ # Get indexing tools from parent class
466
+ indexing_tools = super(ArtifactWrapper, self).get_available_tools()
467
+ return indexing_tools + basic_tools
468
+ except Exception as e:
469
+ # If getting parent tools fails, log warning and return basic tools only
470
+ logging.warning(f"Failed to load indexing tools: {e}. Only basic artifact tools will be available.")
471
+ return basic_tools
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import logging
3
3
  from traceback import format_exc
4
- from typing import Any, Optional, List, Union
4
+ from typing import Any, Optional, List, Union, Literal
5
5
 
6
6
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
7
7
  from langchain_core.runnables import RunnableConfig
@@ -174,26 +174,36 @@ class LLMNode(BaseTool):
174
174
  for key, value in (self.structured_output_dict or {}).items()
175
175
  }
176
176
  # Add default output field for proper response to user
177
- struct_params['elitea_response'] = {'description': 'final output to user', 'type': 'str'}
177
+ struct_params['elitea_response'] = {
178
+ 'description': 'final output to user (summarized output from LLM)', 'type': 'str',
179
+ "default": None}
178
180
  struct_model = create_pydantic_model(f"LLMOutput", struct_params)
179
- completion = llm_client.invoke(messages, config=config)
180
- if hasattr(completion, 'tool_calls') and completion.tool_calls:
181
+ initial_completion = llm_client.invoke(messages, config=config)
182
+ if hasattr(initial_completion, 'tool_calls') and initial_completion.tool_calls:
181
183
  new_messages, _ = self._run_async_in_sync_context(
182
- self.__perform_tool_calling(completion, messages, llm_client, config)
184
+ self.__perform_tool_calling(initial_completion, messages, llm_client, config)
183
185
  )
184
186
  llm = self.__get_struct_output_model(llm_client, struct_model)
185
187
  completion = llm.invoke(new_messages, config=config)
186
188
  result = completion.model_dump()
187
189
  else:
188
- llm = self.__get_struct_output_model(llm_client, struct_model)
189
- completion = llm.invoke(messages, config=config)
190
+ try:
191
+ llm = self.__get_struct_output_model(llm_client, struct_model)
192
+ completion = llm.invoke(messages, config=config)
193
+ except ValueError as e:
194
+ logger.error(f"Error invoking structured output model: {format_exc()}")
195
+ logger.info("Attemping to fall back to json mode")
196
+ # Fallback to regular LLM with JSON extraction
197
+ completion = self.__get_struct_output_model(llm_client, struct_model,
198
+ method="json_mode").invoke(messages, config=config)
190
199
  result = completion.model_dump()
191
200
 
192
201
  # Ensure messages are properly formatted
193
202
  if result.get('messages') and isinstance(result['messages'], list):
194
203
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
195
204
  else:
196
- result['messages'] = messages + [AIMessage(content=result.get(ELITEA_RS, ''))]
205
+ result['messages'] = messages + [
206
+ AIMessage(content=result.get(ELITEA_RS, '') or initial_completion.content)]
197
207
 
198
208
  return result
199
209
  else:
@@ -650,5 +660,5 @@ class LLMNode(BaseTool):
650
660
 
651
661
  return new_messages, current_completion
652
662
 
653
- def __get_struct_output_model(self, llm_client, pydantic_model):
654
- return llm_client.with_structured_output(pydantic_model)
663
+ def __get_struct_output_model(self, llm_client, pydantic_model, method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema"):
664
+ return llm_client.with_structured_output(pydantic_model, method=method)
@@ -99,7 +99,6 @@ class McpRemoteTool(McpServerTool):
99
99
 
100
100
  async def _execute_remote_tool(self, kwargs: Dict[str, Any]) -> str:
101
101
  """Execute the actual remote MCP tool call using SSE client."""
102
- from ...tools.utils import TOOLKIT_SPLITTER
103
102
 
104
103
  # Check for session_id requirement
105
104
  if not self.session_id:
@@ -109,8 +108,8 @@ class McpRemoteTool(McpServerTool):
109
108
  # Use the original tool name from discovery for MCP server invocation
110
109
  tool_name_for_server = self.original_tool_name
111
110
  if not tool_name_for_server:
112
- tool_name_for_server = self.name.rsplit(TOOLKIT_SPLITTER, 1)[-1] if TOOLKIT_SPLITTER in self.name else self.name
113
- logger.warning(f"original_tool_name not set for '{self.name}', using extracted: {tool_name_for_server}")
111
+ tool_name_for_server = self.name
112
+ logger.warning(f"original_tool_name not set for '{self.name}', using: {tool_name_for_server}")
114
113
 
115
114
  logger.info(f"[MCP] Executing tool '{tool_name_for_server}' with session {self.session_id}")
116
115
 
@@ -5,8 +5,6 @@ from typing import Any, Type, Literal, Optional, Union, List
5
5
  from langchain_core.tools import BaseTool
6
6
  from pydantic import BaseModel, Field, create_model, EmailStr, constr, ConfigDict
7
7
 
8
- from ...tools.utils import TOOLKIT_SPLITTER
9
-
10
8
  logger = getLogger(__name__)
11
9
 
12
10
 
@@ -91,13 +89,13 @@ class McpServerTool(BaseTool):
91
89
  return create_model(model_name, **fields)
92
90
 
93
91
  def _run(self, *args, **kwargs):
94
- # Extract the actual tool/prompt name (remove toolkit prefix)
92
+ # Use the tool name directly (no prefix extraction needed)
95
93
  call_data = {
96
94
  "server": self.server,
97
95
  "tool_timeout_sec": self.tool_timeout_sec,
98
96
  "tool_call_id": str(uuid.uuid4()),
99
97
  "params": {
100
- "name": self.name.rsplit(TOOLKIT_SPLITTER)[1] if TOOLKIT_SPLITTER in self.name else self.name,
98
+ "name": self.name,
101
99
  "arguments": kwargs
102
100
  }
103
101
  }
@@ -128,10 +128,39 @@ class AlitaStreamlitCallback(BaseCallbackHandler):
128
128
 
129
129
  tool_name = args[0].get("name")
130
130
  tool_run_id = str(run_id)
131
+
132
+ # Extract metadata from tool if available (from BaseAction.metadata)
133
+ # Try multiple sources for metadata with toolkit_name
134
+ tool_meta = args[0].copy()
135
+
136
+ # Source 1: kwargs['serialized']['metadata'] - LangChain's full tool serialization
137
+ if 'serialized' in kwargs and 'metadata' in kwargs['serialized']:
138
+ tool_meta['metadata'] = kwargs['serialized']['metadata']
139
+ log.info(f"[METADATA] Extracted from serialized: {kwargs['serialized']['metadata']}")
140
+ # Source 2: Check if metadata is directly in args[0] (some LangChain versions)
141
+ elif 'metadata' in args[0]:
142
+ tool_meta['metadata'] = args[0]['metadata']
143
+ log.info(f"[METADATA] Extracted from args[0]: {args[0]['metadata']}")
144
+ else:
145
+ log.info(f"[METADATA] No metadata found. args[0] keys: {list(args[0].keys())}, kwargs keys: {list(kwargs.keys())}")
146
+ # Fallback: Try to extract toolkit_name from description
147
+ description = args[0].get('description', '')
148
+ if description:
149
+ import re
150
+ # Try pattern 1: [Toolkit: name]
151
+ match = re.search(r'\[Toolkit:\s*([^\]]+)\]', description)
152
+ if not match:
153
+ # Try pattern 2: Toolkit: name at start or end
154
+ match = re.search(r'(?:^|\n)Toolkit:\s*([^\n]+)', description)
155
+ if match:
156
+ toolkit_name = match.group(1).strip()
157
+ tool_meta['metadata'] = {'toolkit_name': toolkit_name}
158
+ log.info(f"[METADATA] Extracted toolkit_name from description: {toolkit_name}")
159
+
131
160
  payload = {
132
161
  "tool_name": tool_name,
133
162
  "tool_run_id": tool_run_id,
134
- "tool_meta": args[0],
163
+ "tool_meta": tool_meta,
135
164
  "tool_inputs": kwargs.get('inputs')
136
165
  }
137
166
  payload = json.loads(json.dumps(payload, ensure_ascii=False, default=lambda o: str(o)))
@@ -360,6 +360,7 @@ class McpClient:
360
360
  from .mcp_oauth import (
361
361
  canonical_resource,
362
362
  extract_resource_metadata_url,
363
+ extract_authorization_uri,
363
364
  fetch_resource_metadata_async,
364
365
  infer_authorization_servers_from_realm,
365
366
  fetch_oauth_authorization_server_metadata
@@ -368,13 +369,39 @@ class McpClient:
368
369
  auth_header = response.headers.get('WWW-Authenticate', '')
369
370
  resource_metadata_url = extract_resource_metadata_url(auth_header, self.url)
370
371
 
372
+ # First, try authorization_uri from WWW-Authenticate header (preferred)
373
+ authorization_uri = extract_authorization_uri(auth_header)
374
+
371
375
  metadata = None
372
- if resource_metadata_url:
373
- metadata = await fetch_resource_metadata_async(
374
- resource_metadata_url,
375
- session=self._http_session,
376
- timeout=30
377
- )
376
+ if authorization_uri:
377
+ # Fetch OAuth metadata directly from authorization_uri
378
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(authorization_uri, timeout=30)
379
+ if auth_server_metadata:
380
+ # Extract base authorization server URL from the issuer or the well-known URL
381
+ base_auth_server = auth_server_metadata.get('issuer')
382
+ if not base_auth_server and '/.well-known/' in authorization_uri:
383
+ base_auth_server = authorization_uri.split('/.well-known/')[0]
384
+
385
+ metadata = {
386
+ 'authorization_servers': [base_auth_server] if base_auth_server else [authorization_uri],
387
+ 'oauth_authorization_server': auth_server_metadata
388
+ }
389
+
390
+ # Fall back to resource_metadata if authorization_uri didn't work
391
+ if not metadata:
392
+ if resource_metadata_url:
393
+ metadata = await fetch_resource_metadata_async(
394
+ resource_metadata_url,
395
+ session=self._http_session,
396
+ timeout=30
397
+ )
398
+ # If we got resource_metadata, also fetch oauth_authorization_server
399
+ if metadata and metadata.get('authorization_servers'):
400
+ auth_server_metadata = fetch_oauth_authorization_server_metadata(
401
+ metadata['authorization_servers'][0], timeout=30
402
+ )
403
+ if auth_server_metadata:
404
+ metadata['oauth_authorization_server'] = auth_server_metadata
378
405
 
379
406
  # Infer authorization servers if not in metadata
380
407
  if not metadata or not metadata.get('authorization_servers'):