alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. alita_sdk/cli/agent/__init__.py +5 -0
  2. alita_sdk/cli/agent/default.py +258 -0
  3. alita_sdk/cli/agent_executor.py +15 -3
  4. alita_sdk/cli/agent_loader.py +56 -8
  5. alita_sdk/cli/agent_ui.py +93 -31
  6. alita_sdk/cli/agents.py +2274 -230
  7. alita_sdk/cli/callbacks.py +96 -25
  8. alita_sdk/cli/cli.py +10 -1
  9. alita_sdk/cli/config.py +162 -9
  10. alita_sdk/cli/context/__init__.py +30 -0
  11. alita_sdk/cli/context/cleanup.py +198 -0
  12. alita_sdk/cli/context/manager.py +731 -0
  13. alita_sdk/cli/context/message.py +285 -0
  14. alita_sdk/cli/context/strategies.py +289 -0
  15. alita_sdk/cli/context/token_estimation.py +127 -0
  16. alita_sdk/cli/input_handler.py +419 -0
  17. alita_sdk/cli/inventory.py +1073 -0
  18. alita_sdk/cli/testcases/__init__.py +94 -0
  19. alita_sdk/cli/testcases/data_generation.py +119 -0
  20. alita_sdk/cli/testcases/discovery.py +96 -0
  21. alita_sdk/cli/testcases/executor.py +84 -0
  22. alita_sdk/cli/testcases/logger.py +85 -0
  23. alita_sdk/cli/testcases/parser.py +172 -0
  24. alita_sdk/cli/testcases/prompts.py +91 -0
  25. alita_sdk/cli/testcases/reporting.py +125 -0
  26. alita_sdk/cli/testcases/setup.py +108 -0
  27. alita_sdk/cli/testcases/test_runner.py +282 -0
  28. alita_sdk/cli/testcases/utils.py +39 -0
  29. alita_sdk/cli/testcases/validation.py +90 -0
  30. alita_sdk/cli/testcases/workflow.py +196 -0
  31. alita_sdk/cli/toolkit.py +14 -17
  32. alita_sdk/cli/toolkit_loader.py +35 -5
  33. alita_sdk/cli/tools/__init__.py +36 -2
  34. alita_sdk/cli/tools/approval.py +224 -0
  35. alita_sdk/cli/tools/filesystem.py +910 -64
  36. alita_sdk/cli/tools/planning.py +389 -0
  37. alita_sdk/cli/tools/terminal.py +414 -0
  38. alita_sdk/community/__init__.py +72 -12
  39. alita_sdk/community/inventory/__init__.py +236 -0
  40. alita_sdk/community/inventory/config.py +257 -0
  41. alita_sdk/community/inventory/enrichment.py +2137 -0
  42. alita_sdk/community/inventory/extractors.py +1469 -0
  43. alita_sdk/community/inventory/ingestion.py +3172 -0
  44. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  45. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  46. alita_sdk/community/inventory/parsers/base.py +295 -0
  47. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  48. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  49. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  50. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  51. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  52. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  53. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  54. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  55. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  56. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  57. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  58. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  59. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  60. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  61. alita_sdk/community/inventory/patterns/loader.py +348 -0
  62. alita_sdk/community/inventory/patterns/registry.py +198 -0
  63. alita_sdk/community/inventory/presets.py +535 -0
  64. alita_sdk/community/inventory/retrieval.py +1403 -0
  65. alita_sdk/community/inventory/toolkit.py +173 -0
  66. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  67. alita_sdk/community/inventory/visualize.py +1370 -0
  68. alita_sdk/configurations/__init__.py +1 -1
  69. alita_sdk/configurations/ado.py +141 -20
  70. alita_sdk/configurations/bitbucket.py +0 -3
  71. alita_sdk/configurations/confluence.py +76 -42
  72. alita_sdk/configurations/figma.py +76 -0
  73. alita_sdk/configurations/gitlab.py +17 -5
  74. alita_sdk/configurations/openapi.py +329 -0
  75. alita_sdk/configurations/qtest.py +72 -1
  76. alita_sdk/configurations/report_portal.py +96 -0
  77. alita_sdk/configurations/sharepoint.py +148 -0
  78. alita_sdk/configurations/testio.py +83 -0
  79. alita_sdk/runtime/clients/artifact.py +3 -3
  80. alita_sdk/runtime/clients/client.py +353 -48
  81. alita_sdk/runtime/clients/sandbox_client.py +0 -21
  82. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  83. alita_sdk/runtime/langchain/assistant.py +123 -26
  84. alita_sdk/runtime/langchain/constants.py +642 -1
  85. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  86. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  87. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
  88. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
  89. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  90. alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
  91. alita_sdk/runtime/langchain/langraph_agent.py +279 -73
  92. alita_sdk/runtime/langchain/utils.py +82 -15
  93. alita_sdk/runtime/llms/preloaded.py +2 -6
  94. alita_sdk/runtime/skills/__init__.py +91 -0
  95. alita_sdk/runtime/skills/callbacks.py +498 -0
  96. alita_sdk/runtime/skills/discovery.py +540 -0
  97. alita_sdk/runtime/skills/executor.py +610 -0
  98. alita_sdk/runtime/skills/input_builder.py +371 -0
  99. alita_sdk/runtime/skills/models.py +330 -0
  100. alita_sdk/runtime/skills/registry.py +355 -0
  101. alita_sdk/runtime/skills/skill_runner.py +330 -0
  102. alita_sdk/runtime/toolkits/__init__.py +7 -0
  103. alita_sdk/runtime/toolkits/application.py +21 -9
  104. alita_sdk/runtime/toolkits/artifact.py +15 -5
  105. alita_sdk/runtime/toolkits/datasource.py +13 -6
  106. alita_sdk/runtime/toolkits/mcp.py +139 -251
  107. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  108. alita_sdk/runtime/toolkits/planning.py +178 -0
  109. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  110. alita_sdk/runtime/toolkits/subgraph.py +251 -6
  111. alita_sdk/runtime/toolkits/tools.py +238 -32
  112. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  113. alita_sdk/runtime/tools/__init__.py +3 -1
  114. alita_sdk/runtime/tools/application.py +20 -6
  115. alita_sdk/runtime/tools/artifact.py +511 -28
  116. alita_sdk/runtime/tools/data_analysis.py +183 -0
  117. alita_sdk/runtime/tools/function.py +43 -15
  118. alita_sdk/runtime/tools/image_generation.py +50 -44
  119. alita_sdk/runtime/tools/llm.py +852 -67
  120. alita_sdk/runtime/tools/loop.py +3 -1
  121. alita_sdk/runtime/tools/loop_output.py +3 -1
  122. alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
  123. alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
  124. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  125. alita_sdk/runtime/tools/planning/models.py +246 -0
  126. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  127. alita_sdk/runtime/tools/router.py +2 -4
  128. alita_sdk/runtime/tools/sandbox.py +9 -6
  129. alita_sdk/runtime/tools/skill_router.py +776 -0
  130. alita_sdk/runtime/tools/tool.py +3 -1
  131. alita_sdk/runtime/tools/vectorstore.py +7 -2
  132. alita_sdk/runtime/tools/vectorstore_base.py +51 -11
  133. alita_sdk/runtime/utils/AlitaCallback.py +137 -21
  134. alita_sdk/runtime/utils/constants.py +5 -1
  135. alita_sdk/runtime/utils/mcp_client.py +492 -0
  136. alita_sdk/runtime/utils/mcp_oauth.py +202 -5
  137. alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
  138. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  139. alita_sdk/runtime/utils/serialization.py +155 -0
  140. alita_sdk/runtime/utils/streamlit.py +6 -10
  141. alita_sdk/runtime/utils/toolkit_utils.py +16 -5
  142. alita_sdk/runtime/utils/utils.py +36 -0
  143. alita_sdk/tools/__init__.py +113 -29
  144. alita_sdk/tools/ado/repos/__init__.py +51 -33
  145. alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
  146. alita_sdk/tools/ado/test_plan/__init__.py +25 -9
  147. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  148. alita_sdk/tools/ado/utils.py +1 -18
  149. alita_sdk/tools/ado/wiki/__init__.py +25 -8
  150. alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
  151. alita_sdk/tools/ado/work_item/__init__.py +26 -9
  152. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  153. alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
  154. alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
  155. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  156. alita_sdk/tools/azure_ai/search/__init__.py +11 -8
  157. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  158. alita_sdk/tools/base/tool.py +5 -1
  159. alita_sdk/tools/base_indexer_toolkit.py +170 -45
  160. alita_sdk/tools/bitbucket/__init__.py +17 -12
  161. alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
  162. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  163. alita_sdk/tools/browser/__init__.py +5 -4
  164. alita_sdk/tools/carrier/__init__.py +5 -6
  165. alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
  166. alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
  167. alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
  168. alita_sdk/tools/chunkers/__init__.py +3 -1
  169. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  170. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  171. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  172. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  173. alita_sdk/tools/cloud/aws/__init__.py +10 -7
  174. alita_sdk/tools/cloud/azure/__init__.py +10 -7
  175. alita_sdk/tools/cloud/gcp/__init__.py +10 -7
  176. alita_sdk/tools/cloud/k8s/__init__.py +10 -7
  177. alita_sdk/tools/code/linter/__init__.py +10 -8
  178. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  179. alita_sdk/tools/code/sonar/__init__.py +10 -7
  180. alita_sdk/tools/code_indexer_toolkit.py +73 -23
  181. alita_sdk/tools/confluence/__init__.py +21 -15
  182. alita_sdk/tools/confluence/api_wrapper.py +78 -23
  183. alita_sdk/tools/confluence/loader.py +4 -2
  184. alita_sdk/tools/custom_open_api/__init__.py +12 -5
  185. alita_sdk/tools/elastic/__init__.py +11 -8
  186. alita_sdk/tools/elitea_base.py +493 -30
  187. alita_sdk/tools/figma/__init__.py +58 -11
  188. alita_sdk/tools/figma/api_wrapper.py +1235 -143
  189. alita_sdk/tools/figma/figma_client.py +73 -0
  190. alita_sdk/tools/figma/toon_tools.py +2748 -0
  191. alita_sdk/tools/github/__init__.py +13 -14
  192. alita_sdk/tools/github/github_client.py +224 -100
  193. alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
  194. alita_sdk/tools/github/schemas.py +14 -5
  195. alita_sdk/tools/github/tool.py +5 -1
  196. alita_sdk/tools/github/tool_prompts.py +9 -22
  197. alita_sdk/tools/gitlab/__init__.py +15 -11
  198. alita_sdk/tools/gitlab/api_wrapper.py +207 -41
  199. alita_sdk/tools/gitlab_org/__init__.py +10 -8
  200. alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
  201. alita_sdk/tools/google/bigquery/__init__.py +13 -12
  202. alita_sdk/tools/google/bigquery/tool.py +5 -1
  203. alita_sdk/tools/google_places/__init__.py +10 -8
  204. alita_sdk/tools/google_places/api_wrapper.py +1 -1
  205. alita_sdk/tools/jira/__init__.py +17 -11
  206. alita_sdk/tools/jira/api_wrapper.py +91 -40
  207. alita_sdk/tools/keycloak/__init__.py +11 -8
  208. alita_sdk/tools/localgit/__init__.py +9 -3
  209. alita_sdk/tools/localgit/local_git.py +62 -54
  210. alita_sdk/tools/localgit/tool.py +5 -1
  211. alita_sdk/tools/memory/__init__.py +11 -3
  212. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  213. alita_sdk/tools/ocr/__init__.py +11 -8
  214. alita_sdk/tools/openapi/__init__.py +490 -114
  215. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  216. alita_sdk/tools/openapi/tool.py +20 -0
  217. alita_sdk/tools/pandas/__init__.py +20 -12
  218. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  219. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  220. alita_sdk/tools/postman/__init__.py +11 -11
  221. alita_sdk/tools/pptx/__init__.py +10 -9
  222. alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
  223. alita_sdk/tools/qtest/__init__.py +30 -10
  224. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  225. alita_sdk/tools/rally/__init__.py +10 -8
  226. alita_sdk/tools/rally/api_wrapper.py +1 -1
  227. alita_sdk/tools/report_portal/__init__.py +12 -9
  228. alita_sdk/tools/salesforce/__init__.py +10 -9
  229. alita_sdk/tools/servicenow/__init__.py +17 -14
  230. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  231. alita_sdk/tools/sharepoint/__init__.py +10 -8
  232. alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
  233. alita_sdk/tools/slack/__init__.py +10 -8
  234. alita_sdk/tools/slack/api_wrapper.py +2 -2
  235. alita_sdk/tools/sql/__init__.py +11 -9
  236. alita_sdk/tools/testio/__init__.py +10 -8
  237. alita_sdk/tools/testrail/__init__.py +11 -8
  238. alita_sdk/tools/testrail/api_wrapper.py +1 -1
  239. alita_sdk/tools/utils/__init__.py +9 -4
  240. alita_sdk/tools/utils/content_parser.py +77 -3
  241. alita_sdk/tools/utils/text_operations.py +410 -0
  242. alita_sdk/tools/utils/tool_prompts.py +79 -0
  243. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
  244. alita_sdk/tools/xray/__init__.py +12 -9
  245. alita_sdk/tools/yagmail/__init__.py +9 -3
  246. alita_sdk/tools/zephyr/__init__.py +9 -7
  247. alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
  248. alita_sdk/tools/zephyr_essential/__init__.py +10 -8
  249. alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
  250. alita_sdk/tools/zephyr_essential/client.py +2 -2
  251. alita_sdk/tools/zephyr_scale/__init__.py +11 -9
  252. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  253. alita_sdk/tools/zephyr_squad/__init__.py +10 -8
  254. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
  255. alita_sdk-0.3.627.dist-info/RECORD +468 -0
  256. alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
  257. alita_sdk-0.3.462.dist-info/RECORD +0 -384
  258. alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
  259. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
  260. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
  261. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,6 @@
1
- from typing import List, Any
1
+ from typing import List, Any, Optional
2
+ import logging
3
+ import yaml
2
4
 
3
5
  from langchain_core.tools import BaseTool
4
6
  from langgraph.checkpoint.memory import MemorySaver
@@ -8,7 +10,238 @@ from ..langchain.langraph_agent import create_graph, SUBGRAPH_REGISTRY
8
10
  from ..tools.graph import GraphTool
9
11
  from ..utils.utils import clean_string
10
12
 
13
+ logger = logging.getLogger(__name__)
11
14
 
15
+
16
+ def _resolve_bypass_chain(target: str, bypass_mapping: dict, all_printer_related: set, max_depth: int = 100) -> str:
17
+ """
18
+ Recursively follow bypass mapping chain to find the first non-printer target.
19
+
20
+ Args:
21
+ target: Starting target node ID
22
+ bypass_mapping: Mapping of printer_id -> successor_id
23
+ all_printer_related: Set of all printer and reset node IDs
24
+ max_depth: Maximum chain depth to prevent infinite loops
25
+
26
+ Returns:
27
+ Final non-printer target node ID
28
+ """
29
+ visited = set()
30
+ depth = 0
31
+
32
+ while target in all_printer_related or target in bypass_mapping:
33
+ if depth >= max_depth:
34
+ logger.error(f"Maximum bypass chain depth ({max_depth}) exceeded for target '{target}'")
35
+ break
36
+
37
+ if target in visited:
38
+ logger.error(f"Circular reference detected in bypass chain starting from '{target}'")
39
+ break
40
+
41
+ visited.add(target)
42
+
43
+ if target in bypass_mapping:
44
+ target = bypass_mapping[target]
45
+ depth += 1
46
+ logger.debug(f"Following bypass chain: depth={depth}, target={target}")
47
+ else:
48
+ # Target is in all_printer_related but not in bypass mapping
49
+ logger.warning(f"Target '{target}' is a printer node without bypass mapping")
50
+ break
51
+
52
+ return target
53
+
54
+
55
+ def _filter_printer_nodes_from_yaml(yaml_schema: str) -> str:
56
+ """
57
+ Filter out PrinterNodes and their reset nodes from a YAML schema.
58
+
59
+ This function removes:
60
+ 1. Nodes with type='printer'
61
+ 2. Reset nodes (pattern: {node_id}_reset)
62
+ 3. Rewires transitions to bypass removed printer nodes
63
+ 4. Removes printer nodes from interrupt configurations
64
+
65
+ Args:
66
+ yaml_schema: Original YAML schema string
67
+
68
+ Returns:
69
+ Filtered YAML schema string without PrinterNodes
70
+ """
71
+ try:
72
+ schema_dict = yaml.safe_load(yaml_schema)
73
+
74
+ if not schema_dict or 'nodes' not in schema_dict:
75
+ return yaml_schema
76
+
77
+ nodes = schema_dict.get('nodes', [])
78
+
79
+ # Step 1: Identify printer nodes and build bypass mapping
80
+ printer_nodes = set()
81
+ printer_reset_nodes = set()
82
+ printer_bypass_mapping = {} # printer_node_id -> actual_successor_id
83
+
84
+ # First pass: Identify all printer nodes and reset nodes
85
+ for node in nodes:
86
+ node_id = node.get('id')
87
+ node_type = node.get('type')
88
+
89
+ if not node_id:
90
+ continue
91
+
92
+ # Identify reset nodes by naming pattern
93
+ if node_id.endswith('_reset'):
94
+ printer_reset_nodes.add(node_id)
95
+ continue
96
+
97
+ # Identify main printer nodes
98
+ if node_type == 'printer':
99
+ printer_nodes.add(node_id)
100
+
101
+ # Second pass: Build bypass mapping for printer nodes
102
+ for node in nodes:
103
+ node_id = node.get('id')
104
+ node_type = node.get('type')
105
+
106
+ if not node_id or node_type != 'printer':
107
+ continue
108
+
109
+ # Try standard pattern first: Printer -> Printer_reset -> NextNode
110
+ reset_node_id = f"{node_id}_reset"
111
+ reset_node = next((n for n in nodes if n.get('id') == reset_node_id), None)
112
+
113
+ if reset_node:
114
+ # Standard pattern with reset node
115
+ actual_successor = reset_node.get('transition')
116
+ if actual_successor:
117
+ printer_bypass_mapping[node_id] = actual_successor
118
+ logger.debug(f"Printer bypass mapping (standard): {node_id} -> {actual_successor}")
119
+ else:
120
+ # Direct transition pattern: Printer -> NextNode (no reset node)
121
+ # Get the direct transition from the printer node
122
+ direct_transition = node.get('transition')
123
+ if direct_transition:
124
+ printer_bypass_mapping[node_id] = direct_transition
125
+ logger.debug(f"Printer bypass mapping (direct): {node_id} -> {direct_transition}")
126
+
127
+ # Create the set of all printer-related nodes early so it can be used in rewiring
128
+ all_printer_related = printer_nodes | printer_reset_nodes
129
+
130
+ # Step 2: Filter out printer nodes and reset nodes
131
+ filtered_nodes = []
132
+ for node in nodes:
133
+ node_id = node.get('id')
134
+ node_type = node.get('type')
135
+
136
+ # Skip printer nodes
137
+ if node_type == 'printer':
138
+ logger.debug(f"Filtering out printer node: {node_id}")
139
+ continue
140
+
141
+ # Skip reset nodes
142
+ if node_id in printer_reset_nodes:
143
+ logger.debug(f"Filtering out printer reset node: {node_id}")
144
+ continue
145
+
146
+ # Step 3: Rewire transitions in remaining nodes to bypass printer nodes
147
+ # Use recursive resolution to handle chains of printers
148
+ if 'transition' in node:
149
+ transition = node['transition']
150
+ if transition in printer_bypass_mapping or transition in all_printer_related:
151
+ new_transition = _resolve_bypass_chain(transition, printer_bypass_mapping, all_printer_related)
152
+ if new_transition != transition:
153
+ node['transition'] = new_transition
154
+ logger.debug(f"Rewired transition in node '{node_id}': {transition} -> {new_transition}")
155
+
156
+ # Handle conditional outputs
157
+ if 'condition' in node:
158
+ condition = node['condition']
159
+ if 'conditional_outputs' in condition:
160
+ new_outputs = []
161
+ for output in condition['conditional_outputs']:
162
+ if output in printer_bypass_mapping or output in all_printer_related:
163
+ resolved_output = _resolve_bypass_chain(output, printer_bypass_mapping, all_printer_related)
164
+ new_outputs.append(resolved_output)
165
+ else:
166
+ new_outputs.append(output)
167
+ condition['conditional_outputs'] = new_outputs
168
+
169
+ if 'default_output' in condition:
170
+ default = condition['default_output']
171
+ if default in printer_bypass_mapping or default in all_printer_related:
172
+ condition['default_output'] = _resolve_bypass_chain(default, printer_bypass_mapping, all_printer_related)
173
+
174
+ # Handle decision nodes
175
+ if 'decision' in node:
176
+ decision = node['decision']
177
+ if 'nodes' in decision:
178
+ new_nodes = []
179
+ for decision_node in decision['nodes']:
180
+ if decision_node in printer_bypass_mapping or decision_node in all_printer_related:
181
+ resolved_node = _resolve_bypass_chain(decision_node, printer_bypass_mapping, all_printer_related)
182
+ new_nodes.append(resolved_node)
183
+ else:
184
+ new_nodes.append(decision_node)
185
+ decision['nodes'] = new_nodes
186
+
187
+ # Handle routes (for router nodes)
188
+ if 'routes' in node:
189
+ routes = node['routes']
190
+ if isinstance(routes, list):
191
+ new_routes = []
192
+ for route in routes:
193
+ if isinstance(route, dict) and 'target' in route:
194
+ target = route['target']
195
+ if target in printer_bypass_mapping or target in all_printer_related:
196
+ route['target'] = _resolve_bypass_chain(target, printer_bypass_mapping, all_printer_related)
197
+ new_routes.append(route)
198
+ node['routes'] = new_routes
199
+
200
+ filtered_nodes.append(node)
201
+
202
+ # Update the nodes in schema
203
+ schema_dict['nodes'] = filtered_nodes
204
+
205
+ # Step 4: Filter printer nodes from interrupt configurations
206
+ if 'interrupt_before' in schema_dict:
207
+ schema_dict['interrupt_before'] = [
208
+ i for i in schema_dict['interrupt_before']
209
+ if i not in all_printer_related
210
+ ]
211
+
212
+ if 'interrupt_after' in schema_dict:
213
+ schema_dict['interrupt_after'] = [
214
+ i for i in schema_dict['interrupt_after']
215
+ if i not in all_printer_related
216
+ ]
217
+
218
+ # Update entry point if it points to a printer node
219
+ # Use helper function to recursively resolve the chain
220
+ if 'entry_point' in schema_dict:
221
+ entry_point = schema_dict['entry_point']
222
+ original_entry = entry_point
223
+
224
+ # Check if entry point is a printer node (directly or in bypass mapping)
225
+ if entry_point in all_printer_related or entry_point in printer_bypass_mapping:
226
+ # Use helper function to resolve the chain
227
+ resolved_entry = _resolve_bypass_chain(entry_point, printer_bypass_mapping, all_printer_related)
228
+
229
+ if resolved_entry != original_entry:
230
+ schema_dict['entry_point'] = resolved_entry
231
+ logger.info(f"Updated entry point: {original_entry} -> {resolved_entry}")
232
+
233
+
234
+ # Convert back to YAML
235
+ filtered_yaml = yaml.dump(schema_dict, default_flow_style=False, sort_keys=False)
236
+ return filtered_yaml
237
+
238
+ except Exception as e:
239
+ logger.error(f"Error filtering PrinterNodes from YAML: {e}", exc_info=True)
240
+ # Return original YAML if filtering fails
241
+ return yaml_schema
242
+
243
+
244
+ # TODO: deprecate next release (1/15/2026)
12
245
  class SubgraphToolkit:
13
246
 
14
247
  @staticmethod
@@ -18,21 +251,33 @@ class SubgraphToolkit:
18
251
  application_version_id: int,
19
252
  llm,
20
253
  app_api_key: str,
21
- selected_tools: list[str] = []
254
+ selected_tools: list[str] = [],
255
+ is_subgraph: bool = True,
256
+ mcp_tokens: Optional[dict] = None,
257
+ ignored_mcp_servers: Optional[list] = None
22
258
  ) -> List[BaseTool]:
23
259
  from .tools import get_tools
24
260
  # from langgraph.checkpoint.memory import MemorySaver
25
261
 
26
262
  app_details = client.get_app_details(application_id)
27
263
  version_details = client.get_app_version_details(application_id, application_version_id)
28
- tools = get_tools(version_details['tools'], alita_client=client, llm=llm)
264
+ tools = get_tools(version_details['tools'], alita_client=client, llm=llm,
265
+ mcp_tokens=mcp_tokens, ignored_mcp_servers=ignored_mcp_servers)
29
266
 
30
267
  # Get the subgraph name
31
268
  subgraph_name = app_details.get("name")
32
269
 
270
+ # Get the original YAML
271
+ yaml_schema = version_details['instructions']
272
+
273
+ # Filter PrinterNodes from YAML if this is a subgraph
274
+ if is_subgraph:
275
+ yaml_schema = _filter_printer_nodes_from_yaml(yaml_schema)
276
+ logger.info(f"Filtered PrinterNodes from subgraph pipeline '{subgraph_name}'")
277
+
33
278
  # Populate the registry for flattening approach
34
279
  SUBGRAPH_REGISTRY[subgraph_name] = {
35
- 'yaml': version_details['instructions'],
280
+ 'yaml': yaml_schema, # Use filtered YAML
36
281
  'tools': tools,
37
282
  'flattened': False
38
283
  }
@@ -43,11 +288,11 @@ class SubgraphToolkit:
43
288
  graph = create_graph(
44
289
  client=llm,
45
290
  tools=tools,
46
- yaml_schema=version_details['instructions'],
291
+ yaml_schema=yaml_schema, # Use filtered YAML
47
292
  debug=False,
48
293
  store=None,
49
294
  memory=MemorySaver(),
50
- # for_subgraph=True, # compile as raw subgraph
295
+ for_subgraph=is_subgraph, # Pass flag to create_graph
51
296
  )
52
297
 
53
298
  cleaned_subgraph_name = clean_string(subgraph_name)
@@ -9,18 +9,23 @@ from alita_sdk.tools import get_tools as alita_tools
9
9
  from .application import ApplicationToolkit
10
10
  from .artifact import ArtifactToolkit
11
11
  from .datasource import DatasourcesToolkit
12
+ from .planning import PlanningToolkit
12
13
  from .prompt import PromptToolkit
13
14
  from .subgraph import SubgraphToolkit
14
15
  from .vectorstore import VectorStoreToolkit
15
16
  from .mcp import McpToolkit
17
+ from .mcp_config import McpConfigToolkit, get_mcp_config_toolkit_schemas
18
+ from .skill_router import SkillRouterToolkit
16
19
  from ..tools.mcp_server_tool import McpServerTool
17
20
  from ..tools.sandbox import SandboxToolkit
18
21
  from ..tools.image_generation import ImageGenerationToolkit
22
+ from ..tools.data_analysis import DataAnalysisToolkit
19
23
  # Import community tools
20
24
  from ...community import get_toolkits as community_toolkits, get_tools as community_tools
21
25
  from ...tools.memory import MemoryToolkit
22
26
  from ..utils.mcp_oauth import canonical_resource, McpAuthorizationRequired
23
- from ...tools.utils import TOOLKIT_SPLITTER
27
+ from ...tools.utils import clean_string
28
+ from alita_sdk.tools import _inject_toolkit_id
24
29
 
25
30
  logger = logging.getLogger(__name__)
26
31
 
@@ -29,22 +34,59 @@ def get_toolkits():
29
34
  core_toolkits = [
30
35
  ArtifactToolkit.toolkit_config_schema(),
31
36
  MemoryToolkit.toolkit_config_schema(),
37
+ PlanningToolkit.toolkit_config_schema(),
32
38
  VectorStoreToolkit.toolkit_config_schema(),
33
39
  SandboxToolkit.toolkit_config_schema(),
34
40
  ImageGenerationToolkit.toolkit_config_schema(),
35
- McpToolkit.toolkit_config_schema()
41
+ DataAnalysisToolkit.toolkit_config_schema(),
42
+ McpToolkit.toolkit_config_schema(),
43
+ McpConfigToolkit.toolkit_config_schema(),
44
+ SkillRouterToolkit.toolkit_config_schema()
36
45
  ]
37
46
 
38
- return core_toolkits + community_toolkits() + alita_toolkits()
47
+ # Add configured MCP servers (stdio and http) as available toolkits
48
+ mcp_config_toolkits = get_mcp_config_toolkit_schemas()
39
49
 
50
+ return core_toolkits + mcp_config_toolkits + community_toolkits() + alita_toolkits()
51
+
52
+
53
+ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None) -> list:
54
+ # Sanitize tools_list to handle corrupted tool configurations
55
+ sanitized_tools = []
56
+ for tool in tools_list:
57
+ if isinstance(tool, dict):
58
+ # Check for corrupted structure where 'type' and 'name' contain the full tool config
59
+ if 'type' in tool and isinstance(tool['type'], dict):
60
+ # This is a corrupted tool - use the inner dict instead
61
+ logger.warning(f"Detected corrupted tool configuration (type=dict), fixing: {tool}")
62
+ actual_tool = tool['type'] # or tool['name'], they should be the same
63
+ sanitized_tools.append(actual_tool)
64
+ elif 'name' in tool and isinstance(tool['name'], dict):
65
+ # Another corruption pattern where name contains the full config
66
+ logger.warning(f"Detected corrupted tool configuration (name=dict), fixing: {tool}")
67
+ actual_tool = tool['name']
68
+ sanitized_tools.append(actual_tool)
69
+ elif 'type' in tool and isinstance(tool['type'], str):
70
+ # Valid tool configuration
71
+ sanitized_tools.append(tool)
72
+ else:
73
+ # Skip invalid/corrupted tools that can't be fixed
74
+ logger.warning(f"Skipping invalid tool configuration: {tool}")
75
+ else:
76
+ logger.warning(f"Skipping non-dict tool: {tool}")
77
+ # Skip non-dict tools
40
78
 
41
- def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None) -> list:
42
79
  prompts = []
43
80
  tools = []
81
+ unhandled_tools = [] # Track tools not handled by main processing
44
82
 
45
- for tool in tools_list:
83
+ for tool in sanitized_tools:
84
+ # Flag to track if this tool was processed by the main loop
85
+ # Used to prevent double processing by fallback systems
86
+ tool_handled = False
46
87
  try:
47
88
  if tool['type'] == 'datasource':
89
+ tool_handled = True
48
90
  tools.extend(DatasourcesToolkit.get_toolkit(
49
91
  alita_client,
50
92
  datasource_ids=[int(tool['settings']['datasource_id'])],
@@ -52,24 +94,35 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
52
94
  toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
53
95
  ).get_tools())
54
96
  elif tool['type'] == 'application':
97
+ tool_handled = True
98
+ # Check if this is a pipeline to enable PrinterNode filtering
99
+ is_pipeline_subgraph = tool.get('agent_type', '') == 'pipeline'
100
+
55
101
  tools.extend(ApplicationToolkit.get_toolkit(
56
102
  alita_client,
57
103
  application_id=int(tool['settings']['application_id']),
58
104
  application_version_id=int(tool['settings']['application_version_id']),
59
- selected_tools=[]
105
+ selected_tools=[],
106
+ ignored_mcp_servers=ignored_mcp_servers,
107
+ is_subgraph=is_pipeline_subgraph, # Pass is_subgraph for pipelines
108
+ mcp_tokens=mcp_tokens
60
109
  ).get_tools())
61
- # backward compatibility for pipeline application type as subgraph node
62
- if tool.get('agent_type', '') == 'pipeline':
63
- # static get_toolkit returns a list of CompiledStateGraph stubs
64
- tools.extend(SubgraphToolkit.get_toolkit(
65
- alita_client,
66
- application_id=int(tool['settings']['application_id']),
67
- application_version_id=int(tool['settings']['application_version_id']),
68
- app_api_key=alita_client.auth_token,
69
- selected_tools=[],
70
- llm=llm
71
- ))
110
+ # TODO: deprecate next release (1/15/2026)
111
+ # if is_pipeline_subgraph:
112
+ # # static get_toolkit returns a list of CompiledStateGraph stubs
113
+ # # Pass is_subgraph=True to enable PrinterNode filtering
114
+ # logger.info(f"Processing pipeline as subgraph, will filter PrinterNodes")
115
+ # tools.extend(SubgraphToolkit.get_toolkit(
116
+ # alita_client,
117
+ # application_id=int(tool['settings']['application_id']),
118
+ # application_version_id=int(tool['settings']['application_version_id']),
119
+ # app_api_key=alita_client.auth_token,
120
+ # selected_tools=[],
121
+ # llm=llm,
122
+ # is_subgraph=True # Enable PrinterNode filtering for pipelines used as subgraphs
123
+ # ))
72
124
  elif tool['type'] == 'memory':
125
+ tool_handled = True
73
126
  tools += MemoryToolkit.get_toolkit(
74
127
  namespace=tool['settings'].get('namespace', str(tool['id'])),
75
128
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
@@ -77,6 +130,7 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
77
130
  ).get_tools()
78
131
  # TODO: update configuration of internal tools
79
132
  elif tool['type'] == 'internal_tool':
133
+ tool_handled = True
80
134
  if tool['name'] == 'pyodide':
81
135
  tools += SandboxToolkit.get_toolkit(
82
136
  stateful=False,
@@ -91,8 +145,28 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
91
145
  else:
92
146
  logger.warning("Image generation internal tool requested "
93
147
  "but no image generation model configured")
148
+ elif tool['name'] == 'planner':
149
+ tools += PlanningToolkit.get_toolkit(
150
+ pgvector_configuration=tool.get('settings', {}).get('pgvector_configuration'),
151
+ conversation_id=conversation_id,
152
+ ).get_tools()
153
+ elif tool['name'] == 'data_analysis':
154
+ # Data Analysis internal tool - uses conversation attachment bucket
155
+ settings = tool.get('settings', {})
156
+ bucket_name = settings.get('bucket_name')
157
+ if bucket_name:
158
+ tools += DataAnalysisToolkit.get_toolkit(
159
+ alita_client=alita_client,
160
+ llm=llm,
161
+ bucket_name=bucket_name,
162
+ toolkit_name="Data Analyst",
163
+ ).get_tools()
164
+ else:
165
+ logger.warning("Data Analysis internal tool requested "
166
+ "but no bucket_name provided in settings")
94
167
  elif tool['type'] == 'artifact':
95
- tools.extend(ArtifactToolkit.get_toolkit(
168
+ tool_handled = True
169
+ toolkit_tools = ArtifactToolkit.get_toolkit(
96
170
  client=alita_client,
97
171
  bucket=tool['settings']['bucket'],
98
172
  toolkit_name=tool.get('toolkit_name', ''),
@@ -102,17 +176,72 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
102
176
  pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
103
177
  embedding_model=tool['settings'].get('embedding_model'),
104
178
  collection_name=f"{tool.get('toolkit_name')}",
105
- collection_schema = str(tool['id'])
106
- ).get_tools())
179
+ collection_schema=str(tool['settings'].get('id', tool.get('id', ''))),
180
+ ).get_tools()
181
+ # Inject toolkit_id for artifact tools as well
182
+ # Pass settings as the tool config since that's where the id field is
183
+ _inject_toolkit_id(tool['settings'], toolkit_tools)
184
+ tools.extend(toolkit_tools)
185
+
107
186
  elif tool['type'] == 'vectorstore':
187
+ tool_handled = True
108
188
  tools.extend(VectorStoreToolkit.get_toolkit(
109
189
  llm=llm,
110
190
  toolkit_name=tool.get('toolkit_name', ''),
111
191
  **tool['settings']).get_tools())
192
+ elif tool['type'] == 'planning':
193
+ tool_handled = True
194
+ # Planning toolkit for multi-step task tracking
195
+ settings = tool.get('settings', {})
196
+
197
+ # Check if local mode is enabled (uses filesystem storage, ignores pgvector)
198
+ use_local = settings.get('local', False)
199
+
200
+ if use_local:
201
+ # Local mode - use filesystem storage
202
+ logger.info("Planning toolkit using local filesystem storage (local=true)")
203
+ pgvector_config = {}
204
+ else:
205
+ # Check if explicit connection_string is provided in pgvector_configuration
206
+ explicit_pgvector_config = settings.get('pgvector_configuration', {})
207
+ explicit_connstr = explicit_pgvector_config.get('connection_string') if explicit_pgvector_config else None
208
+
209
+ if explicit_connstr:
210
+ # Use explicitly provided connection string (overrides project secrets)
211
+ logger.info("Using explicit connection_string for planning toolkit")
212
+ pgvector_config = explicit_pgvector_config
213
+ else:
214
+ # Try to fetch pgvector_project_connstr from project secrets
215
+ pgvector_connstr = None
216
+ if alita_client:
217
+ try:
218
+ pgvector_connstr = alita_client.unsecret('pgvector_project_connstr')
219
+ if pgvector_connstr:
220
+ logger.info("Using pgvector_project_connstr for planning toolkit")
221
+ except Exception as e:
222
+ logger.debug(f"pgvector_project_connstr not available: {e}")
223
+
224
+ pgvector_config = {'connection_string': pgvector_connstr} if pgvector_connstr else {}
225
+
226
+ tools.extend(PlanningToolkit.get_toolkit(
227
+ toolkit_name=tool.get('toolkit_name', ''),
228
+ selected_tools=settings.get('selected_tools', []),
229
+ pgvector_configuration=pgvector_config,
230
+ conversation_id=conversation_id or settings.get('conversation_id'),
231
+ ).get_tools())
112
232
  elif tool['type'] == 'mcp':
233
+ tool_handled = True
113
234
  # remote mcp tool initialization with token injection
114
235
  settings = dict(tool['settings'])
115
236
  url = settings.get('url')
237
+
238
+ # Check if this MCP server should be ignored (user chose to continue without auth)
239
+ if ignored_mcp_servers and url:
240
+ canonical_url = canonical_resource(url)
241
+ if canonical_url in ignored_mcp_servers or url in ignored_mcp_servers:
242
+ logger.info(f"[MCP Auth] Skipping ignored MCP server: {url}")
243
+ continue
244
+
116
245
  headers = settings.get('headers')
117
246
  token_data = None
118
247
  session_id = None
@@ -153,26 +282,96 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
153
282
  toolkit_name=tool.get('toolkit_name', ''),
154
283
  client=alita_client,
155
284
  **settings).get_tools())
285
+ elif tool['type'] == 'skill_router':
286
+ tool_handled = True
287
+ # Skills Registry Router Toolkit
288
+ logger.info(f"Processing skill_router toolkit: {tool}")
289
+ try:
290
+ settings = tool.get('settings', {})
291
+ toolkit_name = tool.get('toolkit_name', '')
292
+ selected_tools = settings.get('selected_tools', [])
293
+
294
+ toolkit_tools = SkillRouterToolkit.get_toolkit(
295
+ client=alita_client,
296
+ llm=llm,
297
+ toolkit_name=toolkit_name,
298
+ selected_tools=selected_tools,
299
+ **settings
300
+ ).get_tools()
301
+
302
+ tools.extend(toolkit_tools)
303
+ logger.info(f"✅ Successfully added {len(toolkit_tools)} tools from SkillRouterToolkit")
304
+ except Exception as e:
305
+ logger.error(f"❌ Failed to initialize SkillRouterToolkit: {e}")
306
+ raise
307
+ elif tool['type'] == 'mcp_config' or tool['type'].startswith('mcp_'):
308
+ tool_handled = True
309
+ # MCP Config toolkit - pre-configured MCP servers (stdio or http)
310
+ # Handle both explicit 'mcp_config' type and dynamic names like 'mcp_playwright'
311
+ logger.info(f"Processing mcp_config toolkit: {tool}")
312
+ try:
313
+ settings = tool.get('settings', {})
314
+
315
+ # Server name can come from settings or be extracted from type name
316
+ server_name = settings.get('server_name')
317
+ if not server_name and tool['type'].startswith('mcp_') and tool['type'] != 'mcp_config':
318
+ # Extract server name from type (e.g., 'mcp_playwright' -> 'playwright')
319
+ server_name = tool['type'][4:] # Remove 'mcp_' prefix
320
+
321
+ if not server_name:
322
+ logger.error(f"❌ No server_name found for mcp_config toolkit: {tool}")
323
+ continue
324
+
325
+ toolkit_name = tool.get('toolkit_name', '') or server_name
326
+ selected_tools = settings.get('selected_tools', [])
327
+ excluded_tools = settings.get('excluded_tools', [])
328
+
329
+ # Get server config (may be in settings or from global config)
330
+ server_config = settings.get('server_config')
331
+
332
+ toolkit_tools = McpConfigToolkit.get_toolkit(
333
+ server_name=server_name,
334
+ server_config=server_config,
335
+ user_config=settings,
336
+ selected_tools=selected_tools if selected_tools else None,
337
+ excluded_tools=excluded_tools if excluded_tools else None,
338
+ toolkit_name=toolkit_name,
339
+ client=alita_client,
340
+ ).get_tools()
341
+
342
+ tools.extend(toolkit_tools)
343
+ logger.info(f"✅ Successfully added {len(toolkit_tools)} tools from McpConfigToolkit ({server_name})")
344
+ except Exception as e:
345
+ logger.error(f"❌ Failed to initialize McpConfigToolkit: {e}")
346
+ if not debug_mode:
347
+ raise
348
+ except McpAuthorizationRequired:
349
+ # Re-raise auth required exceptions directly
350
+ raise
156
351
  except Exception as e:
157
- if isinstance(e, McpAuthorizationRequired):
158
- raise
159
352
  logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
160
353
  if debug_mode:
161
354
  logger.info("Skipping tool initialization error due to debug mode.")
162
355
  continue
163
356
  else:
164
357
  raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
165
-
358
+
359
+ # Track unhandled tools (make a copy to avoid reference issues)
360
+ if not tool_handled:
361
+ # Ensure we only add valid tool configurations to unhandled_tools
362
+ if isinstance(tool, dict) and 'type' in tool and isinstance(tool['type'], str):
363
+ unhandled_tools.append(dict(tool))
364
+
166
365
  if len(prompts) > 0:
167
366
  tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
168
-
169
- # Add community tools
170
- tools += community_tools(tools_list, alita_client, llm)
171
- # Add alita tools
172
- tools += alita_tools(tools_list, alita_client, llm, memory_store)
367
+
368
+ # Add community tools (only for unhandled tools)
369
+ tools += community_tools(unhandled_tools, alita_client, llm)
370
+ # Add alita tools (only for unhandled tools)
371
+ tools += alita_tools(unhandled_tools, alita_client, llm, memory_store)
173
372
  # Add MCP tools registered via alita-mcp CLI (static registry)
174
373
  # Note: Tools with type='mcp' are already handled in main loop above
175
- tools += _mcp_tools(tools_list, alita_client)
374
+ tools += _mcp_tools(unhandled_tools, alita_client)
176
375
 
177
376
  # Sanitize tool names to meet OpenAI's function naming requirements
178
377
  # tools = _sanitize_tool_names(tools)
@@ -267,11 +466,18 @@ def _mcp_tools(tools_list, alita):
267
466
 
268
467
  def _init_single_mcp_tool(server_toolkit_name, toolkit_name, available_tool, alita, toolkit_settings):
269
468
  try:
270
-
271
- tool_name = f'{toolkit_name}{TOOLKIT_SPLITTER}{available_tool["name"]}'
469
+ # Use clean tool name without prefix
470
+ tool_name = available_tool["name"]
471
+ # Add toolkit context to description (max 1000 chars)
472
+ toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
473
+ base_description = f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}"
474
+ description = base_description
475
+ if toolkit_context and len(base_description + toolkit_context) <= 1000:
476
+ description = base_description + toolkit_context
477
+
272
478
  return McpServerTool(
273
479
  name=tool_name,
274
- description=f"MCP for a tool '{tool_name}': {available_tool.get("description", "")}",
480
+ description=description,
275
481
  args_schema=McpServerTool.create_pydantic_model_from_schema(
276
482
  available_tool.get("inputSchema", {})
277
483
  ),