alita-sdk 0.3.497__py3-none-any.whl → 0.3.515__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (108) hide show
  1. alita_sdk/cli/inventory.py +12 -195
  2. alita_sdk/community/inventory/__init__.py +12 -0
  3. alita_sdk/community/inventory/toolkit.py +9 -5
  4. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  5. alita_sdk/configurations/ado.py +144 -0
  6. alita_sdk/configurations/confluence.py +76 -42
  7. alita_sdk/configurations/figma.py +76 -0
  8. alita_sdk/configurations/gitlab.py +2 -0
  9. alita_sdk/configurations/qtest.py +72 -1
  10. alita_sdk/configurations/report_portal.py +96 -0
  11. alita_sdk/configurations/sharepoint.py +148 -0
  12. alita_sdk/configurations/testio.py +83 -0
  13. alita_sdk/runtime/clients/artifact.py +2 -2
  14. alita_sdk/runtime/clients/client.py +24 -19
  15. alita_sdk/runtime/clients/sandbox_client.py +14 -0
  16. alita_sdk/runtime/langchain/assistant.py +48 -2
  17. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  18. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
  19. alita_sdk/runtime/langchain/document_loaders/constants.py +2 -1
  20. alita_sdk/runtime/langchain/langraph_agent.py +8 -9
  21. alita_sdk/runtime/langchain/utils.py +6 -1
  22. alita_sdk/runtime/toolkits/artifact.py +14 -5
  23. alita_sdk/runtime/toolkits/datasource.py +13 -6
  24. alita_sdk/runtime/toolkits/mcp.py +26 -157
  25. alita_sdk/runtime/toolkits/planning.py +10 -5
  26. alita_sdk/runtime/toolkits/tools.py +23 -7
  27. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  28. alita_sdk/runtime/tools/artifact.py +139 -6
  29. alita_sdk/runtime/tools/llm.py +20 -10
  30. alita_sdk/runtime/tools/mcp_remote_tool.py +2 -3
  31. alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
  32. alita_sdk/runtime/utils/AlitaCallback.py +30 -1
  33. alita_sdk/runtime/utils/mcp_client.py +33 -6
  34. alita_sdk/runtime/utils/mcp_oauth.py +125 -8
  35. alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
  36. alita_sdk/runtime/utils/utils.py +2 -0
  37. alita_sdk/tools/__init__.py +15 -0
  38. alita_sdk/tools/ado/repos/__init__.py +10 -12
  39. alita_sdk/tools/ado/test_plan/__init__.py +23 -8
  40. alita_sdk/tools/ado/wiki/__init__.py +24 -8
  41. alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
  42. alita_sdk/tools/ado/work_item/__init__.py +24 -8
  43. alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
  44. alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
  45. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  46. alita_sdk/tools/azure_ai/search/__init__.py +9 -7
  47. alita_sdk/tools/base/tool.py +5 -1
  48. alita_sdk/tools/base_indexer_toolkit.py +25 -0
  49. alita_sdk/tools/bitbucket/__init__.py +14 -10
  50. alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
  51. alita_sdk/tools/browser/__init__.py +5 -4
  52. alita_sdk/tools/carrier/__init__.py +5 -6
  53. alita_sdk/tools/cloud/aws/__init__.py +9 -7
  54. alita_sdk/tools/cloud/azure/__init__.py +9 -7
  55. alita_sdk/tools/cloud/gcp/__init__.py +9 -7
  56. alita_sdk/tools/cloud/k8s/__init__.py +9 -7
  57. alita_sdk/tools/code/linter/__init__.py +9 -8
  58. alita_sdk/tools/code/sonar/__init__.py +9 -7
  59. alita_sdk/tools/confluence/__init__.py +15 -10
  60. alita_sdk/tools/custom_open_api/__init__.py +11 -5
  61. alita_sdk/tools/elastic/__init__.py +10 -8
  62. alita_sdk/tools/elitea_base.py +387 -9
  63. alita_sdk/tools/figma/__init__.py +8 -7
  64. alita_sdk/tools/github/__init__.py +12 -14
  65. alita_sdk/tools/github/github_client.py +68 -2
  66. alita_sdk/tools/github/tool.py +5 -1
  67. alita_sdk/tools/gitlab/__init__.py +14 -11
  68. alita_sdk/tools/gitlab/api_wrapper.py +81 -1
  69. alita_sdk/tools/gitlab_org/__init__.py +9 -8
  70. alita_sdk/tools/google/bigquery/__init__.py +12 -12
  71. alita_sdk/tools/google/bigquery/tool.py +5 -1
  72. alita_sdk/tools/google_places/__init__.py +9 -8
  73. alita_sdk/tools/jira/__init__.py +15 -10
  74. alita_sdk/tools/keycloak/__init__.py +10 -8
  75. alita_sdk/tools/localgit/__init__.py +8 -3
  76. alita_sdk/tools/localgit/local_git.py +62 -54
  77. alita_sdk/tools/localgit/tool.py +5 -1
  78. alita_sdk/tools/memory/__init__.py +11 -3
  79. alita_sdk/tools/ocr/__init__.py +10 -8
  80. alita_sdk/tools/openapi/__init__.py +6 -2
  81. alita_sdk/tools/pandas/__init__.py +9 -7
  82. alita_sdk/tools/postman/__init__.py +10 -11
  83. alita_sdk/tools/pptx/__init__.py +9 -9
  84. alita_sdk/tools/qtest/__init__.py +9 -8
  85. alita_sdk/tools/rally/__init__.py +9 -8
  86. alita_sdk/tools/report_portal/__init__.py +11 -9
  87. alita_sdk/tools/salesforce/__init__.py +9 -9
  88. alita_sdk/tools/servicenow/__init__.py +10 -8
  89. alita_sdk/tools/sharepoint/__init__.py +9 -8
  90. alita_sdk/tools/slack/__init__.py +8 -7
  91. alita_sdk/tools/sql/__init__.py +9 -8
  92. alita_sdk/tools/testio/__init__.py +9 -8
  93. alita_sdk/tools/testrail/__init__.py +10 -8
  94. alita_sdk/tools/utils/__init__.py +9 -4
  95. alita_sdk/tools/utils/text_operations.py +254 -0
  96. alita_sdk/tools/xray/__init__.py +10 -8
  97. alita_sdk/tools/yagmail/__init__.py +8 -3
  98. alita_sdk/tools/zephyr/__init__.py +8 -7
  99. alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
  100. alita_sdk/tools/zephyr_essential/__init__.py +9 -8
  101. alita_sdk/tools/zephyr_scale/__init__.py +9 -8
  102. alita_sdk/tools/zephyr_squad/__init__.py +9 -8
  103. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/METADATA +1 -1
  104. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/RECORD +108 -105
  105. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/WHEEL +0 -0
  106. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/entry_points.txt +0 -0
  107. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/licenses/LICENSE +0 -0
  108. {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.515.dist-info}/top_level.txt +0 -0
@@ -15,7 +15,6 @@ from pydantic import BaseModel, ConfigDict, Field, SecretStr
15
15
  from ..tools.mcp_server_tool import McpServerTool
16
16
  from ..tools.mcp_remote_tool import McpRemoteTool
17
17
  from ..tools.mcp_inspect_tool import McpInspectTool
18
- from ...tools.utils import TOOLKIT_SPLITTER, clean_string
19
18
  from ..models.mcp_models import McpConnectionConfig
20
19
  from ..utils.mcp_client import McpClient
21
20
  from ..utils.mcp_oauth import (
@@ -40,110 +39,6 @@ def safe_int(value, default):
40
39
  logger.warning(f"Invalid integer value '{value}', using default {default}")
41
40
  return default
42
41
 
43
- def optimize_tool_name(prefix: str, tool_name: str, max_total_length: int = 64) -> str:
44
- """
45
- Optimize tool name to fit within max_total_length while preserving meaning.
46
-
47
- Args:
48
- prefix: The toolkit prefix (already cleaned)
49
- tool_name: The original tool name
50
- max_total_length: Maximum total length for the full tool name (default: 64)
51
-
52
- Returns:
53
- Optimized full tool name in format: prefix___tool_name
54
- """
55
- splitter = TOOLKIT_SPLITTER
56
- splitter_len = len(splitter)
57
- prefix_len = len(prefix)
58
-
59
- # Calculate available space for tool name
60
- available_space = max_total_length - prefix_len - splitter_len
61
-
62
- if available_space <= 0:
63
- logger.error(f"Prefix '{prefix}' is too long ({prefix_len} chars), cannot create valid tool name")
64
- # Fallback: truncate prefix itself
65
- prefix = prefix[:max_total_length - splitter_len - 10] # Leave 10 chars for tool name
66
- available_space = max_total_length - len(prefix) - splitter_len
67
-
68
- # If tool name fits, use it as-is
69
- if len(tool_name) <= available_space:
70
- return f'{prefix}{splitter}{tool_name}'
71
-
72
- # Tool name is too long, need to optimize
73
- logger.debug(f"Tool name '{tool_name}' is too long ({len(tool_name)} chars), optimizing to fit {available_space} chars")
74
-
75
- # Split tool name into parts (handle camelCase, snake_case, and mixed)
76
- # First, split by underscores and hyphens
77
- parts = re.split(r'[_-]', tool_name)
78
-
79
- # Further split camelCase within each part
80
- all_parts = []
81
- for part in parts:
82
- # Insert underscore before uppercase letters (camelCase to snake_case)
83
- snake_case_part = re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', part)
84
- all_parts.extend(snake_case_part.split('_'))
85
-
86
- # Filter out empty parts
87
- all_parts = [p for p in all_parts if p]
88
-
89
- # Remove redundant prefix words (case-insensitive comparison)
90
- # Only remove if prefix is meaningful (>= 3 chars) to avoid over-filtering
91
- prefix_lower = prefix.lower()
92
- filtered_parts = []
93
- for part in all_parts:
94
- part_lower = part.lower()
95
- # Skip if this part contains the prefix or the prefix contains this part
96
- # But only if both are meaningful (>= 3 chars)
97
- should_remove = False
98
- if len(prefix_lower) >= 3 and len(part_lower) >= 3:
99
- if part_lower in prefix_lower or prefix_lower in part_lower:
100
- should_remove = True
101
- logger.debug(f"Removing redundant part '{part}' (matches prefix '{prefix}')")
102
-
103
- if not should_remove:
104
- filtered_parts.append(part)
105
-
106
- # If we removed all parts, keep the original parts
107
- if not filtered_parts:
108
- filtered_parts = all_parts
109
-
110
- # Reconstruct tool name with filtered parts
111
- optimized_name = '_'.join(filtered_parts)
112
-
113
- # If still too long, truncate intelligently
114
- if len(optimized_name) > available_space:
115
- # Strategy: Keep beginning and end, as they often contain the most important info
116
- # For example: "projectalita_github_io_list_branches" -> "projectalita_list_branches"
117
-
118
- # Try removing middle parts first
119
- if len(filtered_parts) > 2:
120
- # Keep first and last parts, remove middle
121
- kept_parts = [filtered_parts[0], filtered_parts[-1]]
122
- optimized_name = '_'.join(kept_parts)
123
-
124
- # If still too long, add parts from the end until we run out of space
125
- if len(optimized_name) <= available_space and len(filtered_parts) > 2:
126
- for i in range(len(filtered_parts) - 2, 0, -1):
127
- candidate = '_'.join([filtered_parts[0]] + filtered_parts[i:])
128
- if len(candidate) <= available_space:
129
- optimized_name = candidate
130
- break
131
-
132
- # If still too long, just truncate
133
- if len(optimized_name) > available_space:
134
- # Try to truncate at word boundary
135
- truncated = optimized_name[:available_space]
136
- last_underscore = truncated.rfind('_')
137
- if last_underscore > available_space * 0.7: # Keep if we're not losing too much
138
- optimized_name = truncated[:last_underscore]
139
- else:
140
- optimized_name = truncated
141
-
142
- full_name = f'{prefix}{splitter}{optimized_name}'
143
- logger.info(f"Optimized tool name: '{tool_name}' ({len(tool_name)} chars) -> '{optimized_name}' ({len(optimized_name)} chars), full: '{full_name}' ({len(full_name)} chars)")
144
-
145
- return full_name
146
-
147
42
  class McpToolkit(BaseToolkit):
148
43
  """
149
44
  MCP Toolkit for connecting to a single remote MCP server and exposing its tools.
@@ -153,9 +48,6 @@ class McpToolkit(BaseToolkit):
153
48
  tools: List[BaseTool] = []
154
49
  toolkit_name: Optional[str] = None
155
50
 
156
- # Class variable (not Pydantic field) for tool name length limit
157
- toolkit_max_length: ClassVar[int] = 0 # No limit for MCP tool names
158
-
159
51
  def __getstate__(self):
160
52
  """Custom serialization for pickle compatibility."""
161
53
  state = self.__dict__.copy()
@@ -595,28 +487,23 @@ class McpToolkit(BaseToolkit):
595
487
  ) -> Optional[BaseTool]:
596
488
  """Create a BaseTool from a tool/prompt dictionary (from direct HTTP discovery)."""
597
489
  try:
598
- # Store toolkit_max_length in local variable to avoid contextual access issues
599
- max_length_value = cls.toolkit_max_length
600
-
601
- # Clean toolkit name for prefixing
602
- clean_prefix = clean_string(toolkit_name, max_length_value)
603
-
604
- # Optimize tool name to fit within 64 character limit
605
- full_tool_name = optimize_tool_name(clean_prefix, tool_dict.get("name", "unknown"))
490
+ # Use original tool name directly
491
+ tool_name = tool_dict.get("name", "unknown")
606
492
 
607
493
  # Check if this is a prompt (converted to tool)
608
494
  is_prompt = tool_dict.get("_mcp_type") == "prompt"
609
495
  item_type = "prompt" if is_prompt else "tool"
610
496
 
611
- # Build description and ensure it doesn't exceed 1000 characters
612
- description = f"MCP {item_type} '{tool_dict.get('name')}' from toolkit '{toolkit_name}': {tool_dict.get('description', '')}"
497
+ # Build description with toolkit context and ensure it doesn't exceed 1000 characters
498
+ base_description = tool_dict.get('description', '')
499
+ description = f"{base_description}\nToolkit: {toolkit_name} ({connection_config.url})"
613
500
  if len(description) > 1000:
614
501
  description = description[:997] + "..."
615
- logger.debug(f"Trimmed description for tool '{tool_dict.get('name')}' from {len(description)} to 1000 chars")
502
+ logger.debug(f"Trimmed description for tool '{tool_name}' to 1000 chars")
616
503
 
617
504
  # Use McpRemoteTool for remote MCP servers (HTTP/SSE)
618
505
  return McpRemoteTool(
619
- name=full_tool_name,
506
+ name=tool_name,
620
507
  description=description,
621
508
  args_schema=McpServerTool.create_pydantic_model_from_schema(
622
509
  tool_dict.get("inputSchema", {})
@@ -628,11 +515,11 @@ class McpToolkit(BaseToolkit):
628
515
  tool_timeout_sec=timeout,
629
516
  is_prompt=is_prompt,
630
517
  prompt_name=tool_dict.get("_mcp_prompt_name") if is_prompt else None,
631
- original_tool_name=tool_dict.get('name'), # Store original name for MCP server invocation
518
+ original_tool_name=tool_name, # Store original name for MCP server invocation
632
519
  session_id=session_id # Pass session ID for stateful SSE servers
633
520
  )
634
521
  except Exception as e:
635
- logger.error(f"Failed to create MCP tool '{tool_dict.get('name')}' from toolkit '{toolkit_name}': {e}")
522
+ logger.error(f"Failed to create MCP tool '{tool_name}' from toolkit '{toolkit_name}': {e}")
636
523
  return None
637
524
 
638
525
  @classmethod
@@ -691,7 +578,7 @@ class McpToolkit(BaseToolkit):
691
578
  # We don't have full connection config in static mode, so create a basic one
692
579
  # The inspection tool will work as long as the server is accessible
693
580
  inspection_tool = McpInspectTool(
694
- name=f"{clean_string(toolkit_name, 50)}{TOOLKIT_SPLITTER}mcp_inspect",
581
+ name="mcp_inspect",
695
582
  server_name=toolkit_name,
696
583
  server_url="", # Will be populated by the client if available
697
584
  description=f"Inspect available tools, prompts, and resources from MCP toolkit '{toolkit_name}'"
@@ -713,22 +600,17 @@ class McpToolkit(BaseToolkit):
713
600
  ) -> Optional[BaseTool]:
714
601
  """Create a BaseTool from discovered metadata."""
715
602
  try:
716
- # Store toolkit_max_length in local variable to avoid contextual access issues
717
- max_length_value = cls.toolkit_max_length
603
+ # Use original tool name directly
604
+ tool_name = tool_metadata.name
718
605
 
719
- # Clean server name for prefixing (use tool_metadata.server instead of toolkit_name)
720
- clean_prefix = clean_string(tool_metadata.server, max_length_value)
721
- # Optimize tool name to fit within 64 character limit
722
- full_tool_name = optimize_tool_name(clean_prefix, tool_metadata.name)
723
-
724
- # Build description and ensure it doesn't exceed 1000 characters
725
- description = f"MCP tool '{tool_metadata.name}' from server '{tool_metadata.server}': {tool_metadata.description}"
606
+ # Build description with toolkit context and ensure it doesn't exceed 1000 characters
607
+ description = f"{tool_metadata.description}\nToolkit: {toolkit_name}"
726
608
  if len(description) > 1000:
727
609
  description = description[:997] + "..."
728
- logger.debug(f"Trimmed description for tool '{tool_metadata.name}' from {len(description)} to 1000 chars")
610
+ logger.debug(f"Trimmed description for tool '{tool_name}' to 1000 chars")
729
611
 
730
612
  return McpServerTool(
731
- name=full_tool_name,
613
+ name=tool_name,
732
614
  description=description,
733
615
  args_schema=McpServerTool.create_pydantic_model_from_schema(tool_metadata.input_schema),
734
616
  client=client,
@@ -736,7 +618,7 @@ class McpToolkit(BaseToolkit):
736
618
  tool_timeout_sec=timeout
737
619
  )
738
620
  except Exception as e:
739
- logger.error(f"Failed to create MCP tool '{tool_metadata.name}' from server '{tool_metadata.server}': {e}")
621
+ logger.error(f"Failed to create MCP tool '{tool_name}' from server '{tool_metadata.server}': {e}")
740
622
  return None
741
623
 
742
624
  @classmethod
@@ -749,23 +631,18 @@ class McpToolkit(BaseToolkit):
749
631
  ) -> Optional[BaseTool]:
750
632
  """Create a single MCP tool."""
751
633
  try:
752
- # Store toolkit_max_length in local variable to avoid contextual access issues
753
- max_length_value = cls.toolkit_max_length
754
-
755
- # Clean toolkit name for prefixing
756
- clean_prefix = clean_string(toolkit_name, max_length_value)
634
+ # Use original tool name directly
635
+ tool_name = available_tool["name"]
757
636
 
758
- # Optimize tool name to fit within 64 character limit
759
- full_tool_name = optimize_tool_name(clean_prefix, available_tool["name"])
760
-
761
- # Build description and ensure it doesn't exceed 1000 characters
762
- description = f"MCP tool '{available_tool['name']}' from toolkit '{toolkit_name}': {available_tool.get('description', '')}"
637
+ # Build description with toolkit context and ensure it doesn't exceed 1000 characters
638
+ base_description = available_tool.get('description', '')
639
+ description = f"{base_description}\nToolkit: {toolkit_name}"
763
640
  if len(description) > 1000:
764
641
  description = description[:997] + "..."
765
- logger.debug(f"Trimmed description for tool '{available_tool['name']}' from {len(description)} to 1000 chars")
642
+ logger.debug(f"Trimmed description for tool '{tool_name}' to 1000 chars")
766
643
 
767
644
  return McpServerTool(
768
- name=full_tool_name,
645
+ name=tool_name,
769
646
  description=description,
770
647
  args_schema=McpServerTool.create_pydantic_model_from_schema(
771
648
  available_tool.get("inputSchema", {})
@@ -775,7 +652,7 @@ class McpToolkit(BaseToolkit):
775
652
  tool_timeout_sec=timeout
776
653
  )
777
654
  except Exception as e:
778
- logger.error(f"Failed to create MCP tool '{available_tool.get('name')}' from toolkit '{toolkit_name}': {e}")
655
+ logger.error(f"Failed to create MCP tool '{tool_name}' from toolkit '{toolkit_name}': {e}")
779
656
  return None
780
657
 
781
658
  @classmethod
@@ -786,16 +663,8 @@ class McpToolkit(BaseToolkit):
786
663
  ) -> Optional[BaseTool]:
787
664
  """Create the inspection tool for the MCP toolkit."""
788
665
  try:
789
- # Store toolkit_max_length in local variable to avoid contextual access issues
790
- max_length_value = cls.toolkit_max_length
791
-
792
- # Clean toolkit name for prefixing
793
- clean_prefix = clean_string(toolkit_name, max_length_value)
794
-
795
- full_tool_name = f'{clean_prefix}{TOOLKIT_SPLITTER}mcp_inspect'
796
-
797
666
  return McpInspectTool(
798
- name=full_tool_name,
667
+ name="mcp_inspect",
799
668
  server_name=toolkit_name,
800
669
  server_url=connection_config.url,
801
670
  server_headers=connection_config.headers,
@@ -16,7 +16,7 @@ from pydantic.fields import FieldInfo
16
16
 
17
17
  from ..tools.planning import PlanningWrapper
18
18
  from ...tools.base.tool import BaseAction
19
- from ...tools.utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
19
+ from ...tools.utils import clean_string, get_max_toolkit_length
20
20
 
21
21
 
22
22
  class PlanningToolkit(BaseToolkit):
@@ -150,8 +150,8 @@ class PlanningToolkit(BaseToolkit):
150
150
  plan_callback=plan_callback,
151
151
  )
152
152
 
153
- # Build tool name prefix
154
- prefix = clean_string(toolkit_name, cls._toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
153
+ # Use clean toolkit name for context (max 1000 chars in description)
154
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name, 0)}]" if toolkit_name else ''
155
155
 
156
156
  # Create tools from wrapper
157
157
  available_tools = wrapper.get_available_tools()
@@ -159,10 +159,15 @@ class PlanningToolkit(BaseToolkit):
159
159
  if tool["name"] not in selected_tools:
160
160
  continue
161
161
 
162
+ # Add toolkit context to description with character limit
163
+ description = tool["description"]
164
+ if toolkit_context and len(description + toolkit_context) <= 1000:
165
+ description = description + toolkit_context
166
+
162
167
  tools.append(BaseAction(
163
168
  api_wrapper=wrapper,
164
- name=prefix + tool["name"],
165
- description=tool["description"],
169
+ name=tool["name"],
170
+ description=description,
166
171
  args_schema=tool["args_schema"]
167
172
  ))
168
173
 
@@ -21,7 +21,7 @@ from ..tools.image_generation import ImageGenerationToolkit
21
21
  from ...community import get_toolkits as community_toolkits, get_tools as community_tools
22
22
  from ...tools.memory import MemoryToolkit
23
23
  from ..utils.mcp_oauth import canonical_resource, McpAuthorizationRequired
24
- from ...tools.utils import TOOLKIT_SPLITTER
24
+ from ...tools.utils import clean_string
25
25
  from alita_sdk.tools import _inject_toolkit_id
26
26
 
27
27
  logger = logging.getLogger(__name__)
@@ -41,7 +41,7 @@ def get_toolkits():
41
41
  return core_toolkits + community_toolkits() + alita_toolkits()
42
42
 
43
43
 
44
- def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None) -> list:
44
+ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None) -> list:
45
45
  prompts = []
46
46
  tools = []
47
47
 
@@ -110,10 +110,11 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
110
110
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
111
111
  embedding_model=tool['settings'].get('embedding_model'),
112
112
  collection_name=f"{tool.get('toolkit_name')}",
113
- collection_schema=str(tool['id']),
113
+ collection_schema=str(tool['settings'].get('id', tool.get('id', ''))),
114
114
  ).get_tools()
115
115
  # Inject toolkit_id for artifact tools as well
116
- _inject_toolkit_id(tool, toolkit_tools)
116
+ # Pass settings as the tool config since that's where the id field is
117
+ _inject_toolkit_id(tool['settings'], toolkit_tools)
117
118
  tools.extend(toolkit_tools)
118
119
 
119
120
  elif tool['type'] == 'vectorstore':
@@ -164,6 +165,14 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
164
165
  # remote mcp tool initialization with token injection
165
166
  settings = dict(tool['settings'])
166
167
  url = settings.get('url')
168
+
169
+ # Check if this MCP server should be ignored (user chose to continue without auth)
170
+ if ignored_mcp_servers and url:
171
+ canonical_url = canonical_resource(url)
172
+ if canonical_url in ignored_mcp_servers or url in ignored_mcp_servers:
173
+ logger.info(f"[MCP Auth] Skipping ignored MCP server: {url}")
174
+ continue
175
+
167
176
  headers = settings.get('headers')
168
177
  token_data = None
169
178
  session_id = None
@@ -319,11 +328,18 @@ def _mcp_tools(tools_list, alita):
319
328
 
320
329
  def _init_single_mcp_tool(server_toolkit_name, toolkit_name, available_tool, alita, toolkit_settings):
321
330
  try:
322
-
323
- tool_name = f'{toolkit_name}{TOOLKIT_SPLITTER}{available_tool["name"]}'
331
+ # Use clean tool name without prefix
332
+ tool_name = available_tool["name"]
333
+ # Add toolkit context to description (max 1000 chars)
334
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
335
+ base_description = f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}"
336
+ description = base_description
337
+ if toolkit_context and len(base_description + toolkit_context) <= 1000:
338
+ description = base_description + toolkit_context
339
+
324
340
  return McpServerTool(
325
341
  name=tool_name,
326
- description=f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}",
342
+ description=description,
327
343
  args_schema=McpServerTool.create_pydantic_model_from_schema(
328
344
  available_tool.get("inputSchema", {})
329
345
  ),
@@ -1,7 +1,7 @@
1
1
  from logging import getLogger
2
2
  from typing import Any, List, Literal, Optional
3
3
 
4
- from alita_sdk.tools.utils import clean_string, TOOLKIT_SPLITTER
4
+ from alita_sdk.tools.utils import clean_string
5
5
  from pydantic import BaseModel, create_model, Field, ConfigDict
6
6
  from langchain_core.tools import BaseToolkit, BaseTool
7
7
  from alita_sdk.tools.base.tool import BaseAction
@@ -31,7 +31,8 @@ class VectorStoreToolkit(BaseToolkit):
31
31
  toolkit_name: Optional[str] = None,
32
32
  selected_tools: list[str] = []):
33
33
  logger.info("Selected tools: %s", selected_tools)
34
- prefix = clean_string(toolkit_name) + TOOLKIT_SPLITTER if toolkit_name else ''
34
+ # Use clean toolkit name for context (max 1000 chars in description)
35
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
35
36
  if selected_tools is None:
36
37
  selected_tools = []
37
38
  tools = []
@@ -46,11 +47,16 @@ class VectorStoreToolkit(BaseToolkit):
46
47
  # if selected_tools:
47
48
  # if tool["name"] not in selected_tools:
48
49
  # continue
50
+ # Add toolkit context to description with character limit
51
+ description = tool["description"]
52
+ if toolkit_context and len(description + toolkit_context) <= 1000:
53
+ description = description + toolkit_context
49
54
  tools.append(BaseAction(
50
55
  api_wrapper=vectorstore_wrapper,
51
- name=f'{prefix}{tool["name"]}',
52
- description=tool["description"],
53
- args_schema=tool["args_schema"]
56
+ name=tool["name"],
57
+ description=description,
58
+ args_schema=tool["args_schema"],
59
+ metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
54
60
  ))
55
61
  return cls(tools=tools)
56
62
 
@@ -13,6 +13,7 @@ from pydantic import create_model, Field, model_validator
13
13
 
14
14
  from ...tools.non_code_indexer_toolkit import NonCodeIndexerToolkit
15
15
  from ...tools.utils.available_tools_decorator import extend_with_parent_available_tools
16
+ from ...tools.elitea_base import extend_with_file_operations, BaseCodeToolApiWrapper
16
17
  from ...runtime.utils.utils import IndexerKeywords
17
18
 
18
19
 
@@ -20,6 +21,12 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
20
21
  bucket: str
21
22
  artifact: Optional[Any] = None
22
23
 
24
+ # Import file operation methods from BaseCodeToolApiWrapper
25
+ read_file_chunk = BaseCodeToolApiWrapper.read_file_chunk
26
+ read_multiple_files = BaseCodeToolApiWrapper.read_multiple_files
27
+ search_file = BaseCodeToolApiWrapper.search_file
28
+ edit_file = BaseCodeToolApiWrapper.edit_file
29
+
23
30
  @model_validator(mode='before')
24
31
  @classmethod
25
32
  def validate_toolkit(cls, values):
@@ -31,7 +38,24 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
31
38
  return super().validate_toolkit(values)
32
39
 
33
40
  def list_files(self, bucket_name = None, return_as_string = True):
34
- return self.artifact.list(bucket_name, return_as_string)
41
+ """List all files in the artifact bucket with API download links."""
42
+ result = self.artifact.list(bucket_name, return_as_string=False)
43
+
44
+ # Add API download link to each file
45
+ if isinstance(result, dict) and 'rows' in result:
46
+ bucket = bucket_name or self.bucket
47
+
48
+ # Get base_url and project_id from alita client
49
+ base_url = getattr(self.alita, 'base_url', '').rstrip('/')
50
+ project_id = getattr(self.alita, 'project_id', '')
51
+
52
+ for file_info in result['rows']:
53
+ if 'name' in file_info:
54
+ # Generate API download link
55
+ file_name = file_info['name']
56
+ file_info['link'] = f"{base_url}/api/v2/artifacts/artifact/default/{project_id}/{bucket}/{file_name}"
57
+
58
+ return str(result) if return_as_string else result
35
59
 
36
60
  def create_file(self, filename: str, filedata: str, bucket_name = None):
37
61
  # Sanitize filename to prevent regex errors during indexing
@@ -128,6 +152,94 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
128
152
  sheet_name=sheet_name,
129
153
  excel_by_sheets=excel_by_sheets,
130
154
  llm=self.llm)
155
+
156
+ def _read_file(
157
+ self,
158
+ file_path: str,
159
+ branch: str = None,
160
+ bucket_name: str = None,
161
+ **kwargs
162
+ ) -> str:
163
+ """
164
+ Read a file from artifact bucket with optional partial read support.
165
+
166
+ Parameters:
167
+ file_path: Name of the file in the bucket
168
+ branch: Not used for artifacts (kept for API consistency)
169
+ bucket_name: Name of the bucket (uses default if None)
170
+ **kwargs: Additional parameters (offset, limit, head, tail) - currently ignored,
171
+ partial read handled client-side by base class methods
172
+
173
+ Returns:
174
+ File content as string
175
+ """
176
+ return self.read_file(filename=file_path, bucket_name=bucket_name)
177
+
178
+ def _write_file(
179
+ self,
180
+ file_path: str,
181
+ content: str,
182
+ branch: str = None,
183
+ commit_message: str = None,
184
+ bucket_name: str = None
185
+ ) -> str:
186
+ """
187
+ Write content to a file (create or overwrite).
188
+
189
+ Parameters:
190
+ file_path: Name of the file in the bucket
191
+ content: New file content
192
+ branch: Not used for artifacts (kept for API consistency)
193
+ commit_message: Not used for artifacts (kept for API consistency)
194
+ bucket_name: Name of the bucket (uses default if None)
195
+
196
+ Returns:
197
+ Success message
198
+ """
199
+ try:
200
+ # Sanitize filename
201
+ sanitized_filename, was_modified = self._sanitize_filename(file_path)
202
+ if was_modified:
203
+ logging.warning(f"Filename sanitized: '{file_path}' -> '{sanitized_filename}'")
204
+
205
+ # Check if file exists
206
+ try:
207
+ self.artifact.get(artifact_name=sanitized_filename, bucket_name=bucket_name, llm=self.llm)
208
+ # File exists, overwrite it
209
+ result = self.artifact.overwrite(sanitized_filename, content, bucket_name)
210
+
211
+ # Dispatch custom event
212
+ dispatch_custom_event("file_modified", {
213
+ "message": f"File '{sanitized_filename}' updated successfully",
214
+ "filename": sanitized_filename,
215
+ "tool_name": "edit_file",
216
+ "toolkit": "artifact",
217
+ "operation_type": "modify",
218
+ "meta": {
219
+ "bucket": bucket_name or self.bucket
220
+ }
221
+ })
222
+
223
+ return f"Updated file {sanitized_filename}"
224
+ except:
225
+ # File doesn't exist, create it
226
+ result = self.artifact.create(sanitized_filename, content, bucket_name)
227
+
228
+ # Dispatch custom event
229
+ dispatch_custom_event("file_modified", {
230
+ "message": f"File '{sanitized_filename}' created successfully",
231
+ "filename": sanitized_filename,
232
+ "tool_name": "edit_file",
233
+ "toolkit": "artifact",
234
+ "operation_type": "create",
235
+ "meta": {
236
+ "bucket": bucket_name or self.bucket
237
+ }
238
+ })
239
+
240
+ return f"Created file {sanitized_filename}"
241
+ except Exception as e:
242
+ raise ToolException(f"Unable to write file {file_path}: {str(e)}")
131
243
 
132
244
  def delete_file(self, filename: str, bucket_name = None):
133
245
  return self.artifact.delete(filename, bucket_name)
@@ -167,7 +279,11 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
167
279
  return result
168
280
 
169
281
  def create_new_bucket(self, bucket_name: str, expiration_measure = "weeks", expiration_value = 1):
170
- return self.artifact.client.create_bucket(bucket_name, expiration_measure, expiration_value)
282
+ # Sanitize bucket name: replace underscores with hyphens and ensure lowercase
283
+ sanitized_name = bucket_name.replace('_', '-').lower()
284
+ if sanitized_name != bucket_name:
285
+ logging.warning(f"Bucket name '{bucket_name}' was sanitized to '{sanitized_name}' (underscores replaced with hyphens, converted to lowercase)")
286
+ return self.artifact.client.create_bucket(sanitized_name, expiration_measure, expiration_value)
171
287
 
172
288
  def _index_tool_params(self):
173
289
  return {
@@ -236,14 +352,17 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
236
352
  logging.error(f"Failed while parsing the file '{document.metadata['name']}': {e}")
237
353
  yield document
238
354
 
239
- @extend_with_parent_available_tools
355
+ @extend_with_file_operations
240
356
  def get_available_tools(self):
357
+ """Get available tools. Returns all tools for schema; filtering happens at toolkit level."""
241
358
  bucket_name = (Optional[str], Field(description="Name of the bucket to work with."
242
359
  "If bucket is not specified by user directly, the name should be taken from chat history."
243
360
  "If bucket never mentioned in chat, the name will be taken from tool configuration."
244
361
  " ***IMPORTANT*** Underscore `_` is prohibited in bucket name and should be replaced by `-`",
245
362
  default=None))
246
- return [
363
+
364
+ # Basic artifact tools (always available)
365
+ basic_tools = [
247
366
  {
248
367
  "ref": self.list_files,
249
368
  "name": "listFiles",
@@ -328,11 +447,25 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
328
447
  "description": "Creates new bucket specified by user.",
329
448
  "args_schema": create_model(
330
449
  "createNewBucket",
331
- bucket_name=(str, Field(description="Bucket name to create. ***IMPORTANT*** Underscore `_` is prohibited in bucket name and should be replaced by `-`.")),
450
+ bucket_name=(str, Field(
451
+ description="Bucket name to create. Must start with lowercase letter and contain only lowercase letters, numbers, and hyphens. Underscores will be automatically converted to hyphens.",
452
+ pattern=r'^[a-z][a-z0-9_-]*$' # Allow underscores in input, will be sanitized
453
+ )),
332
454
  expiration_measure=(Optional[str], Field(description="Measure of expiration time for bucket configuration."
333
455
  "Possible values: `days`, `weeks`, `months`, `years`.",
334
456
  default="weeks")),
335
457
  expiration_value=(Optional[int], Field(description="Expiration time values.", default=1))
336
458
  )
337
459
  }
338
- ]
460
+ ]
461
+
462
+ # Always include indexing tools in available tools list
463
+ # Filtering based on vector store config happens at toolkit level via decorator
464
+ try:
465
+ # Get indexing tools from parent class
466
+ indexing_tools = super(ArtifactWrapper, self).get_available_tools()
467
+ return indexing_tools + basic_tools
468
+ except Exception as e:
469
+ # If getting parent tools fails, log warning and return basic tools only
470
+ logging.warning(f"Failed to load indexing tools: {e}. Only basic artifact tools will be available.")
471
+ return basic_tools