alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. alita_sdk/cli/agent/__init__.py +5 -0
  2. alita_sdk/cli/agent/default.py +258 -0
  3. alita_sdk/cli/agent_executor.py +15 -3
  4. alita_sdk/cli/agent_loader.py +56 -8
  5. alita_sdk/cli/agent_ui.py +93 -31
  6. alita_sdk/cli/agents.py +2274 -230
  7. alita_sdk/cli/callbacks.py +96 -25
  8. alita_sdk/cli/cli.py +10 -1
  9. alita_sdk/cli/config.py +162 -9
  10. alita_sdk/cli/context/__init__.py +30 -0
  11. alita_sdk/cli/context/cleanup.py +198 -0
  12. alita_sdk/cli/context/manager.py +731 -0
  13. alita_sdk/cli/context/message.py +285 -0
  14. alita_sdk/cli/context/strategies.py +289 -0
  15. alita_sdk/cli/context/token_estimation.py +127 -0
  16. alita_sdk/cli/input_handler.py +419 -0
  17. alita_sdk/cli/inventory.py +1073 -0
  18. alita_sdk/cli/testcases/__init__.py +94 -0
  19. alita_sdk/cli/testcases/data_generation.py +119 -0
  20. alita_sdk/cli/testcases/discovery.py +96 -0
  21. alita_sdk/cli/testcases/executor.py +84 -0
  22. alita_sdk/cli/testcases/logger.py +85 -0
  23. alita_sdk/cli/testcases/parser.py +172 -0
  24. alita_sdk/cli/testcases/prompts.py +91 -0
  25. alita_sdk/cli/testcases/reporting.py +125 -0
  26. alita_sdk/cli/testcases/setup.py +108 -0
  27. alita_sdk/cli/testcases/test_runner.py +282 -0
  28. alita_sdk/cli/testcases/utils.py +39 -0
  29. alita_sdk/cli/testcases/validation.py +90 -0
  30. alita_sdk/cli/testcases/workflow.py +196 -0
  31. alita_sdk/cli/toolkit.py +14 -17
  32. alita_sdk/cli/toolkit_loader.py +35 -5
  33. alita_sdk/cli/tools/__init__.py +36 -2
  34. alita_sdk/cli/tools/approval.py +224 -0
  35. alita_sdk/cli/tools/filesystem.py +910 -64
  36. alita_sdk/cli/tools/planning.py +389 -0
  37. alita_sdk/cli/tools/terminal.py +414 -0
  38. alita_sdk/community/__init__.py +72 -12
  39. alita_sdk/community/inventory/__init__.py +236 -0
  40. alita_sdk/community/inventory/config.py +257 -0
  41. alita_sdk/community/inventory/enrichment.py +2137 -0
  42. alita_sdk/community/inventory/extractors.py +1469 -0
  43. alita_sdk/community/inventory/ingestion.py +3172 -0
  44. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  45. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  46. alita_sdk/community/inventory/parsers/base.py +295 -0
  47. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  48. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  49. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  50. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  51. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  52. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  53. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  54. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  55. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  56. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  57. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  58. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  59. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  60. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  61. alita_sdk/community/inventory/patterns/loader.py +348 -0
  62. alita_sdk/community/inventory/patterns/registry.py +198 -0
  63. alita_sdk/community/inventory/presets.py +535 -0
  64. alita_sdk/community/inventory/retrieval.py +1403 -0
  65. alita_sdk/community/inventory/toolkit.py +173 -0
  66. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  67. alita_sdk/community/inventory/visualize.py +1370 -0
  68. alita_sdk/configurations/__init__.py +1 -1
  69. alita_sdk/configurations/ado.py +141 -20
  70. alita_sdk/configurations/bitbucket.py +0 -3
  71. alita_sdk/configurations/confluence.py +76 -42
  72. alita_sdk/configurations/figma.py +76 -0
  73. alita_sdk/configurations/gitlab.py +17 -5
  74. alita_sdk/configurations/openapi.py +329 -0
  75. alita_sdk/configurations/qtest.py +72 -1
  76. alita_sdk/configurations/report_portal.py +96 -0
  77. alita_sdk/configurations/sharepoint.py +148 -0
  78. alita_sdk/configurations/testio.py +83 -0
  79. alita_sdk/runtime/clients/artifact.py +3 -3
  80. alita_sdk/runtime/clients/client.py +353 -48
  81. alita_sdk/runtime/clients/sandbox_client.py +0 -21
  82. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  83. alita_sdk/runtime/langchain/assistant.py +123 -26
  84. alita_sdk/runtime/langchain/constants.py +642 -1
  85. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  86. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  87. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
  88. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
  89. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  90. alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
  91. alita_sdk/runtime/langchain/langraph_agent.py +279 -73
  92. alita_sdk/runtime/langchain/utils.py +82 -15
  93. alita_sdk/runtime/llms/preloaded.py +2 -6
  94. alita_sdk/runtime/skills/__init__.py +91 -0
  95. alita_sdk/runtime/skills/callbacks.py +498 -0
  96. alita_sdk/runtime/skills/discovery.py +540 -0
  97. alita_sdk/runtime/skills/executor.py +610 -0
  98. alita_sdk/runtime/skills/input_builder.py +371 -0
  99. alita_sdk/runtime/skills/models.py +330 -0
  100. alita_sdk/runtime/skills/registry.py +355 -0
  101. alita_sdk/runtime/skills/skill_runner.py +330 -0
  102. alita_sdk/runtime/toolkits/__init__.py +7 -0
  103. alita_sdk/runtime/toolkits/application.py +21 -9
  104. alita_sdk/runtime/toolkits/artifact.py +15 -5
  105. alita_sdk/runtime/toolkits/datasource.py +13 -6
  106. alita_sdk/runtime/toolkits/mcp.py +139 -251
  107. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  108. alita_sdk/runtime/toolkits/planning.py +178 -0
  109. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  110. alita_sdk/runtime/toolkits/subgraph.py +251 -6
  111. alita_sdk/runtime/toolkits/tools.py +238 -32
  112. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  113. alita_sdk/runtime/tools/__init__.py +3 -1
  114. alita_sdk/runtime/tools/application.py +20 -6
  115. alita_sdk/runtime/tools/artifact.py +511 -28
  116. alita_sdk/runtime/tools/data_analysis.py +183 -0
  117. alita_sdk/runtime/tools/function.py +43 -15
  118. alita_sdk/runtime/tools/image_generation.py +50 -44
  119. alita_sdk/runtime/tools/llm.py +852 -67
  120. alita_sdk/runtime/tools/loop.py +3 -1
  121. alita_sdk/runtime/tools/loop_output.py +3 -1
  122. alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
  123. alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
  124. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  125. alita_sdk/runtime/tools/planning/models.py +246 -0
  126. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  127. alita_sdk/runtime/tools/router.py +2 -4
  128. alita_sdk/runtime/tools/sandbox.py +9 -6
  129. alita_sdk/runtime/tools/skill_router.py +776 -0
  130. alita_sdk/runtime/tools/tool.py +3 -1
  131. alita_sdk/runtime/tools/vectorstore.py +7 -2
  132. alita_sdk/runtime/tools/vectorstore_base.py +51 -11
  133. alita_sdk/runtime/utils/AlitaCallback.py +137 -21
  134. alita_sdk/runtime/utils/constants.py +5 -1
  135. alita_sdk/runtime/utils/mcp_client.py +492 -0
  136. alita_sdk/runtime/utils/mcp_oauth.py +202 -5
  137. alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
  138. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  139. alita_sdk/runtime/utils/serialization.py +155 -0
  140. alita_sdk/runtime/utils/streamlit.py +6 -10
  141. alita_sdk/runtime/utils/toolkit_utils.py +16 -5
  142. alita_sdk/runtime/utils/utils.py +36 -0
  143. alita_sdk/tools/__init__.py +113 -29
  144. alita_sdk/tools/ado/repos/__init__.py +51 -33
  145. alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
  146. alita_sdk/tools/ado/test_plan/__init__.py +25 -9
  147. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  148. alita_sdk/tools/ado/utils.py +1 -18
  149. alita_sdk/tools/ado/wiki/__init__.py +25 -8
  150. alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
  151. alita_sdk/tools/ado/work_item/__init__.py +26 -9
  152. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  153. alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
  154. alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
  155. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  156. alita_sdk/tools/azure_ai/search/__init__.py +11 -8
  157. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  158. alita_sdk/tools/base/tool.py +5 -1
  159. alita_sdk/tools/base_indexer_toolkit.py +170 -45
  160. alita_sdk/tools/bitbucket/__init__.py +17 -12
  161. alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
  162. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  163. alita_sdk/tools/browser/__init__.py +5 -4
  164. alita_sdk/tools/carrier/__init__.py +5 -6
  165. alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
  166. alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
  167. alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
  168. alita_sdk/tools/chunkers/__init__.py +3 -1
  169. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  170. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  171. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  172. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  173. alita_sdk/tools/cloud/aws/__init__.py +10 -7
  174. alita_sdk/tools/cloud/azure/__init__.py +10 -7
  175. alita_sdk/tools/cloud/gcp/__init__.py +10 -7
  176. alita_sdk/tools/cloud/k8s/__init__.py +10 -7
  177. alita_sdk/tools/code/linter/__init__.py +10 -8
  178. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  179. alita_sdk/tools/code/sonar/__init__.py +10 -7
  180. alita_sdk/tools/code_indexer_toolkit.py +73 -23
  181. alita_sdk/tools/confluence/__init__.py +21 -15
  182. alita_sdk/tools/confluence/api_wrapper.py +78 -23
  183. alita_sdk/tools/confluence/loader.py +4 -2
  184. alita_sdk/tools/custom_open_api/__init__.py +12 -5
  185. alita_sdk/tools/elastic/__init__.py +11 -8
  186. alita_sdk/tools/elitea_base.py +493 -30
  187. alita_sdk/tools/figma/__init__.py +58 -11
  188. alita_sdk/tools/figma/api_wrapper.py +1235 -143
  189. alita_sdk/tools/figma/figma_client.py +73 -0
  190. alita_sdk/tools/figma/toon_tools.py +2748 -0
  191. alita_sdk/tools/github/__init__.py +13 -14
  192. alita_sdk/tools/github/github_client.py +224 -100
  193. alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
  194. alita_sdk/tools/github/schemas.py +14 -5
  195. alita_sdk/tools/github/tool.py +5 -1
  196. alita_sdk/tools/github/tool_prompts.py +9 -22
  197. alita_sdk/tools/gitlab/__init__.py +15 -11
  198. alita_sdk/tools/gitlab/api_wrapper.py +207 -41
  199. alita_sdk/tools/gitlab_org/__init__.py +10 -8
  200. alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
  201. alita_sdk/tools/google/bigquery/__init__.py +13 -12
  202. alita_sdk/tools/google/bigquery/tool.py +5 -1
  203. alita_sdk/tools/google_places/__init__.py +10 -8
  204. alita_sdk/tools/google_places/api_wrapper.py +1 -1
  205. alita_sdk/tools/jira/__init__.py +17 -11
  206. alita_sdk/tools/jira/api_wrapper.py +91 -40
  207. alita_sdk/tools/keycloak/__init__.py +11 -8
  208. alita_sdk/tools/localgit/__init__.py +9 -3
  209. alita_sdk/tools/localgit/local_git.py +62 -54
  210. alita_sdk/tools/localgit/tool.py +5 -1
  211. alita_sdk/tools/memory/__init__.py +11 -3
  212. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  213. alita_sdk/tools/ocr/__init__.py +11 -8
  214. alita_sdk/tools/openapi/__init__.py +490 -114
  215. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  216. alita_sdk/tools/openapi/tool.py +20 -0
  217. alita_sdk/tools/pandas/__init__.py +20 -12
  218. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  219. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  220. alita_sdk/tools/postman/__init__.py +11 -11
  221. alita_sdk/tools/pptx/__init__.py +10 -9
  222. alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
  223. alita_sdk/tools/qtest/__init__.py +30 -10
  224. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  225. alita_sdk/tools/rally/__init__.py +10 -8
  226. alita_sdk/tools/rally/api_wrapper.py +1 -1
  227. alita_sdk/tools/report_portal/__init__.py +12 -9
  228. alita_sdk/tools/salesforce/__init__.py +10 -9
  229. alita_sdk/tools/servicenow/__init__.py +17 -14
  230. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  231. alita_sdk/tools/sharepoint/__init__.py +10 -8
  232. alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
  233. alita_sdk/tools/slack/__init__.py +10 -8
  234. alita_sdk/tools/slack/api_wrapper.py +2 -2
  235. alita_sdk/tools/sql/__init__.py +11 -9
  236. alita_sdk/tools/testio/__init__.py +10 -8
  237. alita_sdk/tools/testrail/__init__.py +11 -8
  238. alita_sdk/tools/testrail/api_wrapper.py +1 -1
  239. alita_sdk/tools/utils/__init__.py +9 -4
  240. alita_sdk/tools/utils/content_parser.py +77 -3
  241. alita_sdk/tools/utils/text_operations.py +410 -0
  242. alita_sdk/tools/utils/tool_prompts.py +79 -0
  243. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
  244. alita_sdk/tools/xray/__init__.py +12 -9
  245. alita_sdk/tools/yagmail/__init__.py +9 -3
  246. alita_sdk/tools/zephyr/__init__.py +9 -7
  247. alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
  248. alita_sdk/tools/zephyr_essential/__init__.py +10 -8
  249. alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
  250. alita_sdk/tools/zephyr_essential/client.py +2 -2
  251. alita_sdk/tools/zephyr_scale/__init__.py +11 -9
  252. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  253. alita_sdk/tools/zephyr_squad/__init__.py +10 -8
  254. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
  255. alita_sdk-0.3.627.dist-info/RECORD +468 -0
  256. alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
  257. alita_sdk-0.3.462.dist-info/RECORD +0 -384
  258. alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
  259. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
  260. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
  261. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable
12
12
  from langchain_core.runnables import RunnableConfig
13
13
  from langchain_core.tools import BaseTool, ToolException
14
14
  from langgraph.channels.ephemeral_value import EphemeralValue
15
+ from langgraph.errors import GraphRecursionError
15
16
  from langgraph.graph import StateGraph
16
17
  from langgraph.graph.graph import END, START
17
18
  from langgraph.graph.state import CompiledStateGraph
@@ -22,6 +23,7 @@ from langgraph.store.base import BaseStore
22
23
  from .constants import PRINTER_NODE_RS, PRINTER, PRINTER_COMPLETED_STATE
23
24
  from .mixedAgentRenderes import convert_message_to_json
24
25
  from .utils import create_state, propagate_the_input_mapping, safe_format
26
+ from ..utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META
25
27
  from ..tools.function import FunctionTool
26
28
  from ..tools.indexer_tool import IndexerNode
27
29
  from ..tools.llm import LLMNode
@@ -29,7 +31,7 @@ from ..tools.loop import LoopNode
29
31
  from ..tools.loop_output import LoopToolNode
30
32
  from ..tools.tool import ToolNode
31
33
  from ..utils.evaluate import EvaluateTemplate
32
- from ..utils.utils import clean_string, TOOLKIT_SPLITTER
34
+ from ..utils.utils import clean_string
33
35
  from ..tools.router import RouterNode
34
36
 
35
37
  logger = logging.getLogger(__name__)
@@ -171,12 +173,13 @@ Answer only with step name, no need to add descrip in case none of the steps are
171
173
  """
172
174
 
173
175
  def __init__(self, client, steps: str, description: str = "", decisional_inputs: Optional[list[str]] = [],
174
- default_output: str = 'END'):
176
+ default_output: str = 'END', is_node: bool = False):
175
177
  self.client = client
176
178
  self.steps = ",".join([clean_string(step) for step in steps])
177
179
  self.description = description
178
180
  self.decisional_inputs = decisional_inputs
179
181
  self.default_output = default_output if default_output != 'END' else END
182
+ self.is_node = is_node
180
183
 
181
184
  def invoke(self, state: Annotated[BaseStore, InjectedStore()], config: Optional[RunnableConfig] = None) -> str:
182
185
  additional_info = ""
@@ -186,10 +189,10 @@ Answer only with step name, no need to add descrip in case none of the steps are
186
189
  decision_input = state.get('messages', [])[:]
187
190
  else:
188
191
  if len(additional_info) == 0:
189
- additional_info = """### Additoinal info: """
192
+ additional_info = """### Additional info: """
190
193
  additional_info += "{field}: {value}\n".format(field=field, value=state.get(field, ""))
191
194
  decision_input.append(HumanMessage(
192
- self.prompt.format(steps=self.steps, description=self.description, additional_info=additional_info)))
195
+ self.prompt.format(steps=self.steps, description=safe_format(self.description, state), additional_info=additional_info)))
193
196
  completion = self.client.invoke(decision_input)
194
197
  result = clean_string(completion.content.strip())
195
198
  logger.info(f"Plan to transition to: {result}")
@@ -198,7 +201,8 @@ Answer only with step name, no need to add descrip in case none of the steps are
198
201
  dispatch_custom_event(
199
202
  "on_decision_edge", {"decisional_inputs": self.decisional_inputs, "state": state}, config=config
200
203
  )
201
- return result
204
+ # support of legacy `decision` as part of node
205
+ return {"router_output": result} if self.is_node else result
202
206
 
203
207
 
204
208
  class TransitionalEdge(Runnable):
@@ -226,18 +230,32 @@ class StateDefaultNode(Runnable):
226
230
  for key, value in self.default_vars.items():
227
231
  if isinstance(value, dict) and 'value' in value:
228
232
  temp_value = value['value']
229
- try:
230
- result[key] = ast.literal_eval(temp_value)
231
- except:
232
- logger.debug("Unable to evaluate value, using as is")
233
+ declared_type = value.get('type', '').lower()
234
+
235
+ # If the declared type is 'str' or 'string', preserve the string value
236
+ # Don't auto-convert even if it looks like a valid Python literal
237
+ if declared_type in ('str', 'string'):
233
238
  result[key] = temp_value
239
+ else:
240
+ # For other types, try to evaluate as Python literal
241
+ try:
242
+ result[key] = ast.literal_eval(temp_value)
243
+ except:
244
+ logger.debug("Unable to evaluate value, using as is")
245
+ result[key] = temp_value
234
246
  return result
235
247
 
236
248
  class PrinterNode(Runnable):
237
249
  name = "PrinterNode"
250
+ DEFAULT_FINAL_MSG = "How to proceed? To resume the pipeline - type anything..."
238
251
 
239
- def __init__(self, input_mapping: Optional[dict[str, dict]]):
252
+ def __init__(self, input_mapping: Optional[dict[str, dict]], final_message: Optional[str] = None):
240
253
  self.input_mapping = input_mapping
254
+ # Apply fallback logic for empty/None values
255
+ if final_message and final_message.strip():
256
+ self.final_message = final_message.strip()
257
+ else:
258
+ self.final_message = self.DEFAULT_FINAL_MSG
241
259
 
242
260
  def invoke(self, state: BaseStore, config: Optional[RunnableConfig] = None) -> dict:
243
261
  logger.info(f"Printer Node - Current state variables: {state}")
@@ -254,7 +272,10 @@ class PrinterNode(Runnable):
254
272
  formatted_output = mapping[PRINTER]
255
273
  # add info label to the printer's output
256
274
  if not formatted_output == PRINTER_COMPLETED_STATE:
257
- formatted_output += f"\n\n-----\n*How to proceed?*\n* *to resume the pipeline - type anything...*"
275
+ # convert formatted output to string if it's not
276
+ if not isinstance(formatted_output, str):
277
+ formatted_output = str(formatted_output)
278
+ formatted_output += f"\n\n-----\n*{self.final_message}*"
258
279
  logger.debug(f"Formatted output: {formatted_output}")
259
280
  result[PRINTER_NODE_RS] = formatted_output
260
281
  return result
@@ -441,6 +462,50 @@ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_befo
441
462
  return compiled
442
463
 
443
464
 
465
+ def find_tool_by_name_or_metadata(tools: list, tool_name: str, toolkit_name: Optional[str] = None) -> Optional[BaseTool]:
466
+ """
467
+ Find a tool by name or by matching metadata (toolkit_name + tool_name).
468
+
469
+ For toolkit nodes with toolkit_name specified, this function checks:
470
+ 1. Metadata match first (toolkit_name + tool_name) - PRIORITY when toolkit_name is provided
471
+ 2. Direct tool name match (backward compatibility fallback)
472
+
473
+ For toolkit nodes without toolkit_name, or other node types:
474
+ 1. Direct tool name match
475
+
476
+ Args:
477
+ tools: List of available tools
478
+ tool_name: The tool name to search for
479
+ toolkit_name: Optional toolkit name for metadata matching
480
+
481
+ Returns:
482
+ The matching tool or None if not found
483
+ """
484
+ # When toolkit_name is specified, prioritize metadata matching
485
+ if toolkit_name:
486
+ for tool in tools:
487
+ # Check metadata match first
488
+ if hasattr(tool, 'metadata') and tool.metadata:
489
+ metadata_toolkit_name = tool.metadata.get(TOOLKIT_NAME_META)
490
+ metadata_tool_name = tool.metadata.get(TOOL_NAME_META)
491
+
492
+ # Match if both toolkit_name and tool_name in metadata match
493
+ if metadata_toolkit_name == toolkit_name and metadata_tool_name == tool_name:
494
+ return tool
495
+
496
+ # Fallback to direct name match for backward compatibility
497
+ for tool in tools:
498
+ if tool.name == tool_name:
499
+ return tool
500
+ else:
501
+ # No toolkit_name specified, use direct name match only
502
+ for tool in tools:
503
+ if tool.name == tool_name:
504
+ return tool
505
+
506
+ return None
507
+
508
+
444
509
  def create_graph(
445
510
  client: Any,
446
511
  yaml_schema: str,
@@ -454,13 +519,25 @@ def create_graph(
454
519
  ):
455
520
  """ Create a message graph from a yaml schema """
456
521
 
522
+ # TODO: deprecate next release (1/15/2026)
457
523
  # For top-level graphs (not subgraphs), detect and flatten any subgraphs
458
- if not for_subgraph:
459
- flattened_yaml, additional_tools = detect_and_flatten_subgraphs(yaml_schema)
460
- # Add collected tools from subgraphs to the tools list
461
- tools = list(tools) + additional_tools
462
- # Use the flattened YAML for building the graph
463
- yaml_schema = flattened_yaml
524
+ # if not for_subgraph:
525
+ # flattened_yaml, additional_tools = detect_and_flatten_subgraphs(yaml_schema)
526
+ # # Add collected tools from subgraphs to the tools list
527
+ # tools = list(tools) + additional_tools
528
+ # # Use the flattened YAML for building the graph
529
+ # yaml_schema = flattened_yaml
530
+ # else:
531
+ # # For subgraphs, filter out PrinterNodes from YAML
532
+ # from ..toolkits.subgraph import _filter_printer_nodes_from_yaml
533
+ # yaml_schema = _filter_printer_nodes_from_yaml(yaml_schema)
534
+ # logger.info("Filtered PrinterNodes from subgraph YAML in create_graph")
535
+
536
+ if for_subgraph:
537
+ # Sanitization for sub-graphs
538
+ from ..toolkits.subgraph import _filter_printer_nodes_from_yaml
539
+ yaml_schema = _filter_printer_nodes_from_yaml(yaml_schema)
540
+ logger.info("Filtered PrinterNodes from subgraph YAML in create_graph")
464
541
 
465
542
  schema = yaml.safe_load(yaml_schema)
466
543
  logger.debug(f"Schema: {schema}")
@@ -476,20 +553,37 @@ def create_graph(
476
553
  node_type = node.get('type', 'function')
477
554
  node_id = clean_string(node['id'])
478
555
  toolkit_name = node.get('toolkit_name')
479
- tool_name = clean_string(node.get('tool', node_id))
480
- if toolkit_name:
481
- tool_name = f"{clean_string(toolkit_name)}{TOOLKIT_SPLITTER}{tool_name}"
556
+ tool_name = clean_string(node.get('tool', ''))
557
+ # Tool names are now clean (no prefix needed)
482
558
  logger.info(f"Node: {node_id} : {node_type} - {tool_name}")
483
559
  if node_type in ['function', 'toolkit', 'mcp', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
484
- if node_type == 'mcp' and tool_name not in [tool.name for tool in tools]:
485
- # MCP is not connected and node cannot be added
486
- raise ToolException(f"MCP tool '{tool_name}' not found in the provided tools. "
487
- f"Make sure it is connected properly. Available tools: {[tool.name for tool in tools]}")
488
- for tool in tools:
489
- if tool.name == tool_name:
560
+ if node_type in ['mcp', 'toolkit', 'agent'] and not tool_name:
561
+ # tool is not specified
562
+ raise ToolException(f"Tool name is required for {node_type} node with id '{node_id}'")
563
+
564
+ # Unified validation and tool finding for toolkit, mcp, and agent node types
565
+ matching_tool = None
566
+ if node_type in ['toolkit', 'mcp', 'agent']:
567
+ # Use enhanced validation that checks both direct name and metadata
568
+ matching_tool = find_tool_by_name_or_metadata(tools, tool_name, toolkit_name)
569
+ if not matching_tool:
570
+ # tool is not found in the provided tools
571
+ error_msg = f"Node `{node_id}` with type `{node_type}` has tool '{tool_name}'"
572
+ if toolkit_name:
573
+ error_msg += f" (toolkit: '{toolkit_name}')"
574
+ error_msg += f" which is not found in the provided tools. Make sure it is connected properly. Available tools: {format_tools(tools)}"
575
+ raise ToolException(error_msg)
576
+ else:
577
+ # For other node types, find tool by direct name match
578
+ for tool in tools:
579
+ if tool.name == tool_name:
580
+ matching_tool = tool
581
+ break
582
+
583
+ if matching_tool:
490
584
  if node_type in ['function', 'toolkit', 'mcp']:
491
585
  lg_builder.add_node(node_id, FunctionTool(
492
- tool=tool, name=node_id, return_type='dict',
586
+ tool=matching_tool, name=node_id, return_type='dict',
493
587
  output_variables=node.get('output', []),
494
588
  input_mapping=node.get('input_mapping',
495
589
  {'messages': {'type': 'variable', 'value': 'messages'}}),
@@ -500,7 +594,7 @@ def create_graph(
500
594
  {'messages': {'type': 'variable', 'value': 'messages'}})
501
595
  output_vars = node.get('output', [])
502
596
  lg_builder.add_node(node_id, FunctionTool(
503
- client=client, tool=tool,
597
+ client=client, tool=matching_tool,
504
598
  name=node_id, return_type='str',
505
599
  output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
506
600
  input_variables=input_params,
@@ -508,15 +602,15 @@ def create_graph(
508
602
  ))
509
603
  elif node_type == 'subgraph' or node_type == 'pipeline':
510
604
  # assign parent memory/store
511
- # tool.checkpointer = memory
512
- # tool.store = store
605
+ # matching_tool.checkpointer = memory
606
+ # matching_tool.store = store
513
607
  # wrap with mappings
514
608
  pipeline_name = node.get('tool', None)
515
609
  if not pipeline_name:
516
610
  raise ValueError(
517
611
  "Subgraph must have a 'tool' node: add required tool to the subgraph node")
518
612
  node_fn = SubgraphRunnable(
519
- inner=tool.graph,
613
+ inner=matching_tool.graph,
520
614
  name=pipeline_name,
521
615
  input_mapping=node.get('input_mapping', {}),
522
616
  output_mapping=node.get('output_mapping', {}),
@@ -525,7 +619,7 @@ def create_graph(
525
619
  break # skip legacy handling
526
620
  elif node_type == 'tool':
527
621
  lg_builder.add_node(node_id, ToolNode(
528
- client=client, tool=tool,
622
+ client=client, tool=matching_tool,
529
623
  name=node_id, return_type='dict',
530
624
  output_variables=node.get('output', []),
531
625
  input_variables=node.get('input', ['messages']),
@@ -534,7 +628,7 @@ def create_graph(
534
628
  ))
535
629
  elif node_type == 'loop':
536
630
  lg_builder.add_node(node_id, LoopNode(
537
- client=client, tool=tool,
631
+ client=client, tool=matching_tool,
538
632
  name=node_id, return_type='dict',
539
633
  output_variables=node.get('output', []),
540
634
  input_variables=node.get('input', ['messages']),
@@ -544,15 +638,15 @@ def create_graph(
544
638
  loop_toolkit_name = node.get('loop_toolkit_name')
545
639
  loop_tool_name = node.get('loop_tool')
546
640
  if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
547
- loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(
548
- loop_tool_name)
641
+ # Use clean tool name (no prefix)
642
+ loop_tool_name = clean_string(loop_tool_name)
549
643
  for t in tools:
550
644
  if t.name == loop_tool_name:
551
645
  logger.debug(f"Loop tool discovered: {t}")
552
646
  lg_builder.add_node(node_id, LoopToolNode(
553
647
  client=client,
554
648
  name=node_id, return_type='dict',
555
- tool=tool, loop_tool=t,
649
+ tool=matching_tool, loop_tool=t,
556
650
  variables_mapping=node.get('variables_mapping', {}),
557
651
  output_variables=node.get('output', []),
558
652
  input_variables=node.get('input', ['messages']),
@@ -568,7 +662,7 @@ def create_graph(
568
662
  indexer_tool = t
569
663
  logger.info(f"Indexer tool: {indexer_tool}")
570
664
  lg_builder.add_node(node_id, IndexerNode(
571
- client=client, tool=tool,
665
+ client=client, tool=matching_tool,
572
666
  index_tool=indexer_tool,
573
667
  input_mapping=node.get('input_mapping', {}),
574
668
  name=node_id, return_type='dict',
@@ -577,7 +671,6 @@ def create_graph(
577
671
  output_variables=node.get('output', []),
578
672
  input_variables=node.get('input', ['messages']),
579
673
  structured_output=node.get('structured_output', False)))
580
- break
581
674
  elif node_type == 'code':
582
675
  from ..tools.sandbox import create_sandbox_tool
583
676
  sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
@@ -603,10 +696,10 @@ def create_graph(
603
696
  tool_names = []
604
697
  if isinstance(connected_tools, dict):
605
698
  for toolkit, selected_tools in connected_tools.items():
606
- for tool in selected_tools:
607
- tool_names.append(f"{toolkit}{TOOLKIT_SPLITTER}{tool}")
699
+ # Add tool names directly (no prefix)
700
+ tool_names.extend(selected_tools)
608
701
  elif isinstance(connected_tools, list):
609
- # for cases when tools are provided as a list of names with already bound toolkit_name
702
+ # Use provided tool names as-is
610
703
  tool_names = connected_tools
611
704
 
612
705
  if tool_names:
@@ -629,19 +722,34 @@ def create_graph(
629
722
  output_variables=output_vars,
630
723
  input_variables=node.get('input', ['messages']),
631
724
  structured_output=node.get('structured_output', False),
725
+ tool_execution_timeout=node.get('tool_execution_timeout', 900),
632
726
  available_tools=available_tools,
633
727
  tool_names=tool_names,
634
728
  steps_limit=kwargs.get('steps_limit', 25)
635
729
  ))
636
- elif node_type == 'router':
637
- # Add a RouterNode as an independent node
638
- lg_builder.add_node(node_id, RouterNode(
639
- name=node_id,
640
- condition=node.get('condition', ''),
641
- routes=node.get('routes', []),
642
- default_output=node.get('default_output', 'END'),
643
- input_variables=node.get('input', ['messages'])
644
- ))
730
+ elif node_type in ['router', 'decision']:
731
+ if node_type == 'router':
732
+ # Add a RouterNode as an independent node
733
+ lg_builder.add_node(node_id, RouterNode(
734
+ name=node_id,
735
+ condition=node.get('condition', ''),
736
+ routes=node.get('routes', []),
737
+ default_output=node.get('default_output', 'END'),
738
+ input_variables=node.get('input', ['messages'])
739
+ ))
740
+ elif node_type == 'decision':
741
+ logger.info(f'Adding decision: {node["nodes"]}')
742
+ # fallback to old-style decision node
743
+ decisional_inputs = node.get('decisional_inputs')
744
+ decisional_inputs = node.get('input', ['messages']) if not decisional_inputs else decisional_inputs
745
+ lg_builder.add_node(node_id, DecisionEdge(
746
+ client, node['nodes'],
747
+ node.get('description', ""),
748
+ decisional_inputs=decisional_inputs,
749
+ default_output=node.get('default_output', 'END'),
750
+ is_node=True
751
+ ))
752
+
645
753
  # Add a single conditional edge for all routes
646
754
  lg_builder.add_conditional_edges(
647
755
  node_id,
@@ -663,6 +771,7 @@ def create_graph(
663
771
  elif node_type == 'printer':
664
772
  lg_builder.add_node(node_id, PrinterNode(
665
773
  input_mapping=node.get('input_mapping', {'printer': {'type': 'fixed', 'value': ''}}),
774
+ final_message=node.get('final_message'),
666
775
  ))
667
776
 
668
777
  # add interrupts after printer node if specified
@@ -732,8 +841,20 @@ def create_graph(
732
841
  debug=debug,
733
842
  )
734
843
  except ValueError as e:
735
- raise ValueError(
736
- f"Validation of the schema failed. {e}\n\nDEBUG INFO:**Schema Nodes:**\n\n{lg_builder.nodes}\n\n**Schema Enges:**\n\n{lg_builder.edges}\n\n**Tools Available:**\n\n{tools}")
844
+ # Build a clearer debug message without complex f-string expressions
845
+ debug_nodes = "\n*".join(lg_builder.nodes.keys()) if lg_builder and lg_builder.nodes else ""
846
+ debug_message = (
847
+ "Validation of the schema failed. {err}\n\n"
848
+ "DEBUG INFO:**Schema Nodes:**\n\n*{nodes}\n\n"
849
+ "**Schema Edges:**\n\n{edges}\n\n"
850
+ "**Tools Available:**\n\n{tools}"
851
+ ).format(
852
+ err=e,
853
+ nodes=debug_nodes,
854
+ edges=lg_builder.edges if lg_builder else {},
855
+ tools=format_tools(tools),
856
+ )
857
+ raise ValueError(debug_message)
737
858
  # If building a nested subgraph, return the raw CompiledStateGraph
738
859
  if for_subgraph:
739
860
  return graph
@@ -747,6 +868,14 @@ def create_graph(
747
868
  )
748
869
  return compiled.validate()
749
870
 
871
+ def format_tools(tools_list: list) -> str:
872
+ """Format a list of tool names into a comma-separated string."""
873
+ try:
874
+ return ', '.join([tool.name for tool in tools_list])
875
+ except Exception as e:
876
+ logger.warning(f"Failed to format tools list: {e}")
877
+ return str(tools_list)
878
+
750
879
  def set_defaults(d):
751
880
  """Set default values for dictionary entries based on their type."""
752
881
  type_defaults = {
@@ -817,11 +946,19 @@ class LangGraphAgentRunnable(CompiledStateGraph):
817
946
  if not config.get("configurable", {}).get("thread_id", ""):
818
947
  config["configurable"] = {"thread_id": str(uuid4())}
819
948
  thread_id = config.get("configurable", {}).get("thread_id")
949
+
950
+ # Check if checkpoint exists early for chat_history handling
951
+ checkpoint_exists = self.checkpointer and self.checkpointer.get_tuple(config)
952
+
820
953
  # Handle chat history and current input properly
821
954
  if input.get('chat_history') and not input.get('messages'):
822
- # Convert chat history dict messages to LangChain message objects
823
- chat_history = input.pop('chat_history')
824
- input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
955
+ if checkpoint_exists:
956
+ # Checkpoint already has conversation history - discard redundant chat_history
957
+ input.pop('chat_history', None)
958
+ else:
959
+ # No checkpoint - convert chat history dict messages to LangChain message objects
960
+ chat_history = input.pop('chat_history')
961
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
825
962
 
826
963
  # handler for LLM node: if no input (Chat perspective), then take last human message
827
964
  # Track if input came from messages to handle content extraction properly
@@ -869,6 +1006,16 @@ class LangGraphAgentRunnable(CompiledStateGraph):
869
1006
  else:
870
1007
  # All content was text, remove this message from the list
871
1008
  input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
1009
+ else:
1010
+ # Message came from input['input'], not from input['messages']
1011
+ # If there are non-text parts (images, etc.), preserve them in messages
1012
+ if non_text_parts:
1013
+ # Initialize messages if it doesn't exist or is empty
1014
+ if not input.get('messages'):
1015
+ input['messages'] = []
1016
+ # Create a new message with only non-text content
1017
+ non_text_message = HumanMessage(content=non_text_parts)
1018
+ input['messages'].append(non_text_message)
872
1019
 
873
1020
  elif isinstance(current_content, str):
874
1021
  # on regenerate case
@@ -902,40 +1049,99 @@ class LangGraphAgentRunnable(CompiledStateGraph):
902
1049
  "with no accompanying text."
903
1050
  )
904
1051
 
905
- logging.info(f"Input: {thread_id} - {input}")
906
- if self.checkpointer and self.checkpointer.get_tuple(config):
907
- self.update_state(config, input)
908
- if config.pop("should_continue", False):
909
- invoke_input = input
1052
+ logger.info(f"Input: {thread_id} - {input}")
1053
+ try:
1054
+ if self.checkpointer and self.checkpointer.get_tuple(config):
1055
+ if config.pop("should_continue", False):
1056
+ invoke_input = input
1057
+ else:
1058
+ self.update_state(config, input)
1059
+ invoke_input = None
1060
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
910
1061
  else:
911
- invoke_input = None
912
- result = super().invoke(invoke_input, config=config, *args, **kwargs)
913
- else:
914
- result = super().invoke(input, config=config, *args, **kwargs)
1062
+ result = super().invoke(input, config=config, *args, **kwargs)
1063
+ except GraphRecursionError as e:
1064
+ current_recursion_limit = config.get("recursion_limit", 0)
1065
+ logger.warning("ToolExecutionLimitReached caught in LangGraphAgentRunnable: %s", e)
1066
+ return self._handle_graph_recursion_error(
1067
+ config=config,
1068
+ thread_id=thread_id,
1069
+ current_recursion_limit=current_recursion_limit,
1070
+ )
1071
+
915
1072
  try:
916
- if result.get(PRINTER_NODE_RS) == PRINTER_COMPLETED_STATE:
917
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
918
- result['messages'][-1].content)
1073
+ # Check if printer node output exists
1074
+ printer_output = result.get(PRINTER_NODE_RS)
1075
+ if printer_output == PRINTER_COMPLETED_STATE:
1076
+ # Printer completed, extract last AI message
1077
+ messages = result['messages']
1078
+ output = next(
1079
+ (msg.content for msg in reversed(messages)
1080
+ if not isinstance(msg, HumanMessage)),
1081
+ messages[-1].content
1082
+ ) if messages else result.get('output')
1083
+ elif printer_output is not None:
1084
+ # Printer node has output (interrupted state)
1085
+ output = printer_output
919
1086
  else:
920
- # used for printer node output - it will be reset by next `reset` node
921
- output = result.get(PRINTER_NODE_RS)
922
- except:
923
- output = list(result.values())[-1]
1087
+ # No printer node, extract last AI message from messages
1088
+ messages = result.get('messages', [])
1089
+ output = next(
1090
+ (msg.content for msg in reversed(messages)
1091
+ if not isinstance(msg, HumanMessage)),
1092
+ None
1093
+ )
1094
+ except Exception:
1095
+ # Fallback: try to get last value or last message
1096
+ output = str(list(result.values())[-1]) if result else 'Output is undefined'
924
1097
  config_state = self.get_state(config)
925
1098
  is_execution_finished = not config_state.next
926
1099
  if is_execution_finished:
927
1100
  thread_id = None
928
1101
 
1102
+ final_output = f"Assistant run has been completed, but output is None.\nAdding last message if any: {messages[-1] if messages else []}" if is_execution_finished and output is None else output
1103
+
929
1104
  result_with_state = {
930
- "output": output,
1105
+ "output": final_output,
931
1106
  "thread_id": thread_id,
932
1107
  "execution_finished": is_execution_finished
933
1108
  }
934
1109
 
935
1110
  # Include all state values in the result
936
1111
  if hasattr(config_state, 'values') and config_state.values:
1112
+ # except of key = 'output' which is already included
1113
+ for key, value in config_state.values.items():
1114
+ if key != 'output':
1115
+ result_with_state[key] = value
1116
+
1117
+ return result_with_state
1118
+
1119
+ def _handle_graph_recursion_error(
1120
+ self,
1121
+ config: RunnableConfig,
1122
+ thread_id: str,
1123
+ current_recursion_limit: int,
1124
+ ) -> dict:
1125
+ """Handle GraphRecursionError by returning a soft-boundary response."""
1126
+ config_state = self.get_state(config)
1127
+ is_execution_finished = False
1128
+
1129
+ friendly_output = (
1130
+ f"Tool step limit {current_recursion_limit} reached for this run. You can continue by sending another "
1131
+ "message or refining your request."
1132
+ )
1133
+
1134
+ result_with_state: dict[str, Any] = {
1135
+ "output": friendly_output,
1136
+ "thread_id": thread_id,
1137
+ "execution_finished": is_execution_finished,
1138
+ "tool_execution_limit_reached": True,
1139
+ }
1140
+
1141
+ if hasattr(config_state, "values") and config_state.values:
937
1142
  for key, value in config_state.values.items():
938
- result_with_state[key] = value
1143
+ if key != "output":
1144
+ result_with_state[key] = value
939
1145
 
940
1146
  return result_with_state
941
1147