alita-sdk 0.3.351__py3-none-any.whl → 0.3.499__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (206) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +5 -0
  4. alita_sdk/cli/agent/default.py +258 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +215 -0
  7. alita_sdk/cli/agent_ui.py +228 -0
  8. alita_sdk/cli/agents.py +3601 -0
  9. alita_sdk/cli/callbacks.py +647 -0
  10. alita_sdk/cli/cli.py +168 -0
  11. alita_sdk/cli/config.py +306 -0
  12. alita_sdk/cli/context/__init__.py +30 -0
  13. alita_sdk/cli/context/cleanup.py +198 -0
  14. alita_sdk/cli/context/manager.py +731 -0
  15. alita_sdk/cli/context/message.py +285 -0
  16. alita_sdk/cli/context/strategies.py +289 -0
  17. alita_sdk/cli/context/token_estimation.py +127 -0
  18. alita_sdk/cli/formatting.py +182 -0
  19. alita_sdk/cli/input_handler.py +419 -0
  20. alita_sdk/cli/inventory.py +1256 -0
  21. alita_sdk/cli/mcp_loader.py +315 -0
  22. alita_sdk/cli/toolkit.py +327 -0
  23. alita_sdk/cli/toolkit_loader.py +85 -0
  24. alita_sdk/cli/tools/__init__.py +43 -0
  25. alita_sdk/cli/tools/approval.py +224 -0
  26. alita_sdk/cli/tools/filesystem.py +1751 -0
  27. alita_sdk/cli/tools/planning.py +389 -0
  28. alita_sdk/cli/tools/terminal.py +414 -0
  29. alita_sdk/community/__init__.py +64 -8
  30. alita_sdk/community/inventory/__init__.py +224 -0
  31. alita_sdk/community/inventory/config.py +257 -0
  32. alita_sdk/community/inventory/enrichment.py +2137 -0
  33. alita_sdk/community/inventory/extractors.py +1469 -0
  34. alita_sdk/community/inventory/ingestion.py +3172 -0
  35. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  36. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  37. alita_sdk/community/inventory/parsers/base.py +295 -0
  38. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  39. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  40. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  41. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  42. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  43. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  44. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  45. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  46. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  47. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  48. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  49. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  50. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  51. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  52. alita_sdk/community/inventory/patterns/loader.py +348 -0
  53. alita_sdk/community/inventory/patterns/registry.py +198 -0
  54. alita_sdk/community/inventory/presets.py +535 -0
  55. alita_sdk/community/inventory/retrieval.py +1403 -0
  56. alita_sdk/community/inventory/toolkit.py +173 -0
  57. alita_sdk/community/inventory/visualize.py +1370 -0
  58. alita_sdk/configurations/bitbucket.py +94 -2
  59. alita_sdk/configurations/confluence.py +96 -1
  60. alita_sdk/configurations/gitlab.py +79 -0
  61. alita_sdk/configurations/jira.py +103 -0
  62. alita_sdk/configurations/testrail.py +88 -0
  63. alita_sdk/configurations/xray.py +93 -0
  64. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  65. alita_sdk/configurations/zephyr_essential.py +75 -0
  66. alita_sdk/runtime/clients/artifact.py +1 -1
  67. alita_sdk/runtime/clients/client.py +214 -42
  68. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  69. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  70. alita_sdk/runtime/clients/sandbox_client.py +373 -0
  71. alita_sdk/runtime/langchain/assistant.py +118 -30
  72. alita_sdk/runtime/langchain/constants.py +8 -1
  73. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  74. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  75. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  76. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +41 -12
  77. alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
  78. alita_sdk/runtime/langchain/document_loaders/constants.py +116 -99
  79. alita_sdk/runtime/langchain/interfaces/llm_processor.py +2 -2
  80. alita_sdk/runtime/langchain/langraph_agent.py +307 -71
  81. alita_sdk/runtime/langchain/utils.py +48 -8
  82. alita_sdk/runtime/llms/preloaded.py +2 -6
  83. alita_sdk/runtime/models/mcp_models.py +61 -0
  84. alita_sdk/runtime/toolkits/__init__.py +26 -0
  85. alita_sdk/runtime/toolkits/application.py +9 -2
  86. alita_sdk/runtime/toolkits/artifact.py +18 -6
  87. alita_sdk/runtime/toolkits/datasource.py +13 -6
  88. alita_sdk/runtime/toolkits/mcp.py +780 -0
  89. alita_sdk/runtime/toolkits/planning.py +178 -0
  90. alita_sdk/runtime/toolkits/tools.py +205 -55
  91. alita_sdk/runtime/toolkits/vectorstore.py +9 -4
  92. alita_sdk/runtime/tools/__init__.py +11 -3
  93. alita_sdk/runtime/tools/application.py +7 -0
  94. alita_sdk/runtime/tools/artifact.py +225 -12
  95. alita_sdk/runtime/tools/function.py +95 -5
  96. alita_sdk/runtime/tools/graph.py +10 -4
  97. alita_sdk/runtime/tools/image_generation.py +212 -0
  98. alita_sdk/runtime/tools/llm.py +494 -102
  99. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  100. alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
  101. alita_sdk/runtime/tools/mcp_server_tool.py +4 -4
  102. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  103. alita_sdk/runtime/tools/planning/models.py +246 -0
  104. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  105. alita_sdk/runtime/tools/router.py +2 -1
  106. alita_sdk/runtime/tools/sandbox.py +180 -79
  107. alita_sdk/runtime/tools/vectorstore.py +22 -21
  108. alita_sdk/runtime/tools/vectorstore_base.py +125 -52
  109. alita_sdk/runtime/utils/AlitaCallback.py +106 -20
  110. alita_sdk/runtime/utils/mcp_client.py +465 -0
  111. alita_sdk/runtime/utils/mcp_oauth.py +244 -0
  112. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  113. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  114. alita_sdk/runtime/utils/streamlit.py +40 -13
  115. alita_sdk/runtime/utils/toolkit_utils.py +28 -9
  116. alita_sdk/runtime/utils/utils.py +12 -0
  117. alita_sdk/tools/__init__.py +77 -33
  118. alita_sdk/tools/ado/repos/__init__.py +7 -6
  119. alita_sdk/tools/ado/repos/repos_wrapper.py +11 -11
  120. alita_sdk/tools/ado/test_plan/__init__.py +7 -7
  121. alita_sdk/tools/ado/wiki/__init__.py +7 -11
  122. alita_sdk/tools/ado/wiki/ado_wrapper.py +89 -15
  123. alita_sdk/tools/ado/work_item/__init__.py +7 -11
  124. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  125. alita_sdk/tools/advanced_jira_mining/__init__.py +8 -7
  126. alita_sdk/tools/aws/delta_lake/__init__.py +11 -9
  127. alita_sdk/tools/azure_ai/search/__init__.py +7 -6
  128. alita_sdk/tools/base_indexer_toolkit.py +345 -70
  129. alita_sdk/tools/bitbucket/__init__.py +9 -8
  130. alita_sdk/tools/bitbucket/api_wrapper.py +50 -6
  131. alita_sdk/tools/browser/__init__.py +4 -4
  132. alita_sdk/tools/carrier/__init__.py +4 -6
  133. alita_sdk/tools/chunkers/__init__.py +3 -1
  134. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  135. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  136. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  137. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  138. alita_sdk/tools/cloud/aws/__init__.py +7 -6
  139. alita_sdk/tools/cloud/azure/__init__.py +7 -6
  140. alita_sdk/tools/cloud/gcp/__init__.py +7 -6
  141. alita_sdk/tools/cloud/k8s/__init__.py +7 -6
  142. alita_sdk/tools/code/linter/__init__.py +7 -7
  143. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  144. alita_sdk/tools/code/sonar/__init__.py +8 -7
  145. alita_sdk/tools/code_indexer_toolkit.py +199 -0
  146. alita_sdk/tools/confluence/__init__.py +9 -8
  147. alita_sdk/tools/confluence/api_wrapper.py +171 -75
  148. alita_sdk/tools/confluence/loader.py +10 -0
  149. alita_sdk/tools/custom_open_api/__init__.py +9 -4
  150. alita_sdk/tools/elastic/__init__.py +8 -7
  151. alita_sdk/tools/elitea_base.py +492 -52
  152. alita_sdk/tools/figma/__init__.py +7 -7
  153. alita_sdk/tools/figma/api_wrapper.py +2 -1
  154. alita_sdk/tools/github/__init__.py +9 -9
  155. alita_sdk/tools/github/api_wrapper.py +9 -26
  156. alita_sdk/tools/github/github_client.py +62 -2
  157. alita_sdk/tools/gitlab/__init__.py +8 -8
  158. alita_sdk/tools/gitlab/api_wrapper.py +135 -33
  159. alita_sdk/tools/gitlab_org/__init__.py +7 -8
  160. alita_sdk/tools/google/bigquery/__init__.py +11 -12
  161. alita_sdk/tools/google_places/__init__.py +8 -7
  162. alita_sdk/tools/jira/__init__.py +9 -7
  163. alita_sdk/tools/jira/api_wrapper.py +100 -52
  164. alita_sdk/tools/keycloak/__init__.py +8 -7
  165. alita_sdk/tools/localgit/local_git.py +56 -54
  166. alita_sdk/tools/memory/__init__.py +1 -1
  167. alita_sdk/tools/non_code_indexer_toolkit.py +3 -2
  168. alita_sdk/tools/ocr/__init__.py +8 -7
  169. alita_sdk/tools/openapi/__init__.py +10 -1
  170. alita_sdk/tools/pandas/__init__.py +8 -7
  171. alita_sdk/tools/postman/__init__.py +7 -8
  172. alita_sdk/tools/postman/api_wrapper.py +19 -8
  173. alita_sdk/tools/postman/postman_analysis.py +8 -1
  174. alita_sdk/tools/pptx/__init__.py +8 -9
  175. alita_sdk/tools/qtest/__init__.py +16 -11
  176. alita_sdk/tools/qtest/api_wrapper.py +1784 -88
  177. alita_sdk/tools/rally/__init__.py +7 -8
  178. alita_sdk/tools/report_portal/__init__.py +9 -7
  179. alita_sdk/tools/salesforce/__init__.py +7 -7
  180. alita_sdk/tools/servicenow/__init__.py +10 -10
  181. alita_sdk/tools/sharepoint/__init__.py +7 -6
  182. alita_sdk/tools/sharepoint/api_wrapper.py +127 -36
  183. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  184. alita_sdk/tools/sharepoint/utils.py +8 -2
  185. alita_sdk/tools/slack/__init__.py +7 -6
  186. alita_sdk/tools/sql/__init__.py +8 -7
  187. alita_sdk/tools/sql/api_wrapper.py +71 -23
  188. alita_sdk/tools/testio/__init__.py +7 -6
  189. alita_sdk/tools/testrail/__init__.py +8 -9
  190. alita_sdk/tools/utils/__init__.py +26 -4
  191. alita_sdk/tools/utils/content_parser.py +88 -60
  192. alita_sdk/tools/utils/text_operations.py +254 -0
  193. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +76 -26
  194. alita_sdk/tools/xray/__init__.py +9 -7
  195. alita_sdk/tools/zephyr/__init__.py +7 -6
  196. alita_sdk/tools/zephyr_enterprise/__init__.py +8 -6
  197. alita_sdk/tools/zephyr_essential/__init__.py +7 -6
  198. alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
  199. alita_sdk/tools/zephyr_scale/__init__.py +7 -6
  200. alita_sdk/tools/zephyr_squad/__init__.py +7 -6
  201. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +147 -2
  202. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/RECORD +206 -130
  203. alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
  204. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
  205. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
  206. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
@@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable
12
12
  from langchain_core.runnables import RunnableConfig
13
13
  from langchain_core.tools import BaseTool, ToolException
14
14
  from langgraph.channels.ephemeral_value import EphemeralValue
15
+ from langgraph.errors import GraphRecursionError
15
16
  from langgraph.graph import StateGraph
16
17
  from langgraph.graph.graph import END, START
17
18
  from langgraph.graph.state import CompiledStateGraph
@@ -19,8 +20,9 @@ from langgraph.managed.base import is_managed_value
19
20
  from langgraph.prebuilt import InjectedStore
20
21
  from langgraph.store.base import BaseStore
21
22
 
23
+ from .constants import PRINTER_NODE_RS, PRINTER, PRINTER_COMPLETED_STATE
22
24
  from .mixedAgentRenderes import convert_message_to_json
23
- from .utils import create_state, propagate_the_input_mapping
25
+ from .utils import create_state, propagate_the_input_mapping, safe_format
24
26
  from ..tools.function import FunctionTool
25
27
  from ..tools.indexer_tool import IndexerNode
26
28
  from ..tools.llm import LLMNode
@@ -28,7 +30,7 @@ from ..tools.loop import LoopNode
28
30
  from ..tools.loop_output import LoopToolNode
29
31
  from ..tools.tool import ToolNode
30
32
  from ..utils.evaluate import EvaluateTemplate
31
- from ..utils.utils import clean_string, TOOLKIT_SPLITTER
33
+ from ..utils.utils import clean_string
32
34
  from ..tools.router import RouterNode
33
35
 
34
36
  logger = logging.getLogger(__name__)
@@ -170,12 +172,13 @@ Answer only with step name, no need to add descrip in case none of the steps are
170
172
  """
171
173
 
172
174
  def __init__(self, client, steps: str, description: str = "", decisional_inputs: Optional[list[str]] = [],
173
- default_output: str = 'END'):
175
+ default_output: str = 'END', is_node: bool = False):
174
176
  self.client = client
175
177
  self.steps = ",".join([clean_string(step) for step in steps])
176
178
  self.description = description
177
179
  self.decisional_inputs = decisional_inputs
178
180
  self.default_output = default_output if default_output != 'END' else END
181
+ self.is_node = is_node
179
182
 
180
183
  def invoke(self, state: Annotated[BaseStore, InjectedStore()], config: Optional[RunnableConfig] = None) -> str:
181
184
  additional_info = ""
@@ -197,7 +200,8 @@ Answer only with step name, no need to add descrip in case none of the steps are
197
200
  dispatch_custom_event(
198
201
  "on_decision_edge", {"decisional_inputs": self.decisional_inputs, "state": state}, config=config
199
202
  )
200
- return result
203
+ # support of legacy `decision` as part of node
204
+ return {"router_output": result} if self.is_node else result
201
205
 
202
206
 
203
207
  class TransitionalEdge(Runnable):
@@ -232,6 +236,35 @@ class StateDefaultNode(Runnable):
232
236
  result[key] = temp_value
233
237
  return result
234
238
 
239
+ class PrinterNode(Runnable):
240
+ name = "PrinterNode"
241
+
242
+ def __init__(self, input_mapping: Optional[dict[str, dict]]):
243
+ self.input_mapping = input_mapping
244
+
245
+ def invoke(self, state: BaseStore, config: Optional[RunnableConfig] = None) -> dict:
246
+ logger.info(f"Printer Node - Current state variables: {state}")
247
+ result = {}
248
+ logger.debug(f"Initial text pattern: {self.input_mapping}")
249
+ mapping = propagate_the_input_mapping(self.input_mapping, [], state)
250
+ # for printer node we expect that all the lists will be joined into strings already
251
+ # Join any lists that haven't been converted yet
252
+ for key, value in mapping.items():
253
+ if isinstance(value, list):
254
+ mapping[key] = ', '.join(str(item) for item in value)
255
+ if mapping.get(PRINTER) is None:
256
+ raise ToolException(f"PrinterNode requires '{PRINTER}' field in input mapping")
257
+ formatted_output = mapping[PRINTER]
258
+ # add info label to the printer's output
259
+ if not formatted_output == PRINTER_COMPLETED_STATE:
260
+ # convert formatted output to string if it's not
261
+ if not isinstance(formatted_output, str):
262
+ formatted_output = str(formatted_output)
263
+ formatted_output += f"\n\n-----\n*How to proceed?*\n* *to resume the pipeline - type anything...*"
264
+ logger.debug(f"Formatted output: {formatted_output}")
265
+ result[PRINTER_NODE_RS] = formatted_output
266
+ return result
267
+
235
268
 
236
269
  class StateModifierNode(Runnable):
237
270
  name = "StateModifierNode"
@@ -348,8 +381,8 @@ class StateModifierNode(Runnable):
348
381
  return result
349
382
 
350
383
 
351
-
352
- def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None, state_class=None, output_variables=None):
384
+ def prepare_output_schema(lg_builder, memory, store, debug=False, interrupt_before=None, interrupt_after=None,
385
+ state_class=None, output_variables=None):
353
386
  # prepare output channels
354
387
  if interrupt_after is None:
355
388
  interrupt_after = []
@@ -450,15 +483,18 @@ def create_graph(
450
483
  node_id = clean_string(node['id'])
451
484
  toolkit_name = node.get('toolkit_name')
452
485
  tool_name = clean_string(node.get('tool', node_id))
453
- if toolkit_name:
454
- tool_name = f"{clean_string(toolkit_name)}{TOOLKIT_SPLITTER}{tool_name}"
486
+ # Tool names are now clean (no prefix needed)
455
487
  logger.info(f"Node: {node_id} : {node_type} - {tool_name}")
456
- if node_type in ['function', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
488
+ if node_type in ['function', 'toolkit', 'mcp', 'tool', 'loop', 'loop_from_tool', 'indexer', 'subgraph', 'pipeline', 'agent']:
489
+ if node_type == 'mcp' and tool_name not in [tool.name for tool in tools]:
490
+ # MCP is not connected and node cannot be added
491
+ raise ToolException(f"MCP tool '{tool_name}' not found in the provided tools. "
492
+ f"Make sure it is connected properly. Available tools: {[tool.name for tool in tools]}")
457
493
  for tool in tools:
458
494
  if tool.name == tool_name:
459
- if node_type == 'function':
495
+ if node_type in ['function', 'toolkit', 'mcp']:
460
496
  lg_builder.add_node(node_id, FunctionTool(
461
- tool=tool, name=node['id'], return_type='dict',
497
+ tool=tool, name=node_id, return_type='dict',
462
498
  output_variables=node.get('output', []),
463
499
  input_mapping=node.get('input_mapping',
464
500
  {'messages': {'type': 'variable', 'value': 'messages'}}),
@@ -466,11 +502,12 @@ def create_graph(
466
502
  elif node_type == 'agent':
467
503
  input_params = node.get('input', ['messages'])
468
504
  input_mapping = node.get('input_mapping',
469
- {'messages': {'type': 'variable', 'value': 'messages'}})
505
+ {'messages': {'type': 'variable', 'value': 'messages'}})
506
+ output_vars = node.get('output', [])
470
507
  lg_builder.add_node(node_id, FunctionTool(
471
508
  client=client, tool=tool,
472
- name=node['id'], return_type='str',
473
- output_variables=node.get('output', []),
509
+ name=node_id, return_type='str',
510
+ output_variables=output_vars + ['messages'] if 'messages' not in output_vars else output_vars,
474
511
  input_variables=input_params,
475
512
  input_mapping= input_mapping
476
513
  ))
@@ -481,7 +518,8 @@ def create_graph(
481
518
  # wrap with mappings
482
519
  pipeline_name = node.get('tool', None)
483
520
  if not pipeline_name:
484
- raise ValueError("Subgraph must have a 'tool' node: add required tool to the subgraph node")
521
+ raise ValueError(
522
+ "Subgraph must have a 'tool' node: add required tool to the subgraph node")
485
523
  node_fn = SubgraphRunnable(
486
524
  inner=tool.graph,
487
525
  name=pipeline_name,
@@ -493,25 +531,16 @@ def create_graph(
493
531
  elif node_type == 'tool':
494
532
  lg_builder.add_node(node_id, ToolNode(
495
533
  client=client, tool=tool,
496
- name=node['id'], return_type='dict',
534
+ name=node_id, return_type='dict',
497
535
  output_variables=node.get('output', []),
498
536
  input_variables=node.get('input', ['messages']),
499
537
  structured_output=node.get('structured_output', False),
500
538
  task=node.get('task')
501
539
  ))
502
- # TODO: decide on struct output for agent nodes
503
- # elif node_type == 'agent':
504
- # lg_builder.add_node(node_id, AgentNode(
505
- # client=client, tool=tool,
506
- # name=node['id'], return_type='dict',
507
- # output_variables=node.get('output', []),
508
- # input_variables=node.get('input', ['messages']),
509
- # task=node.get('task')
510
- # ))
511
540
  elif node_type == 'loop':
512
541
  lg_builder.add_node(node_id, LoopNode(
513
542
  client=client, tool=tool,
514
- name=node['id'], return_type='dict',
543
+ name=node_id, return_type='dict',
515
544
  output_variables=node.get('output', []),
516
545
  input_variables=node.get('input', ['messages']),
517
546
  task=node.get('task', '')
@@ -520,13 +549,14 @@ def create_graph(
520
549
  loop_toolkit_name = node.get('loop_toolkit_name')
521
550
  loop_tool_name = node.get('loop_tool')
522
551
  if (loop_toolkit_name and loop_tool_name) or loop_tool_name:
523
- loop_tool_name = f"{clean_string(loop_toolkit_name)}{TOOLKIT_SPLITTER}{loop_tool_name}" if loop_toolkit_name else clean_string(loop_tool_name)
552
+ # Use clean tool name (no prefix)
553
+ loop_tool_name = clean_string(loop_tool_name)
524
554
  for t in tools:
525
555
  if t.name == loop_tool_name:
526
556
  logger.debug(f"Loop tool discovered: {t}")
527
557
  lg_builder.add_node(node_id, LoopToolNode(
528
558
  client=client,
529
- name=node['id'], return_type='dict',
559
+ name=node_id, return_type='dict',
530
560
  tool=tool, loop_tool=t,
531
561
  variables_mapping=node.get('variables_mapping', {}),
532
562
  output_variables=node.get('output', []),
@@ -546,13 +576,26 @@ def create_graph(
546
576
  client=client, tool=tool,
547
577
  index_tool=indexer_tool,
548
578
  input_mapping=node.get('input_mapping', {}),
549
- name=node['id'], return_type='dict',
579
+ name=node_id, return_type='dict',
550
580
  chunking_tool=node.get('chunking_tool', None),
551
581
  chunking_config=node.get('chunking_config', {}),
552
582
  output_variables=node.get('output', []),
553
583
  input_variables=node.get('input', ['messages']),
554
584
  structured_output=node.get('structured_output', False)))
555
585
  break
586
+ elif node_type == 'code':
587
+ from ..tools.sandbox import create_sandbox_tool
588
+ sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True,
589
+ alita_client=kwargs.get('alita_client', None))
590
+ code_data = node.get('code', {'type': 'fixed', 'value': "return 'Code block is empty'"})
591
+ lg_builder.add_node(node_id, FunctionTool(
592
+ tool=sandbox_tool, name=node['id'], return_type='dict',
593
+ output_variables=node.get('output', []),
594
+ input_mapping={'code': code_data},
595
+ input_variables=node.get('input', ['messages']),
596
+ structured_output=node.get('structured_output', False),
597
+ alita_client=kwargs.get('alita_client', None)
598
+ ))
556
599
  elif node_type == 'llm':
557
600
  output_vars = node.get('output', [])
558
601
  output_vars_dict = {
@@ -565,10 +608,10 @@ def create_graph(
565
608
  tool_names = []
566
609
  if isinstance(connected_tools, dict):
567
610
  for toolkit, selected_tools in connected_tools.items():
568
- for tool in selected_tools:
569
- tool_names.append(f"{toolkit}{TOOLKIT_SPLITTER}{tool}")
611
+ # Add tool names directly (no prefix)
612
+ tool_names.extend(selected_tools)
570
613
  elif isinstance(connected_tools, list):
571
- # for cases when tools are provided as a list of names with already bound toolkit_name
614
+ # Use provided tool names as-is
572
615
  tool_names = connected_tools
573
616
 
574
617
  if tool_names:
@@ -581,27 +624,41 @@ def create_graph(
581
624
  else:
582
625
  # Use all available tools
583
626
  available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
584
-
627
+
585
628
  lg_builder.add_node(node_id, LLMNode(
586
629
  client=client,
587
630
  input_mapping=node.get('input_mapping', {'messages': {'type': 'variable', 'value': 'messages'}}),
588
- name=node['id'],
631
+ name=node_id,
589
632
  return_type='dict',
590
633
  structured_output_dict=output_vars_dict,
591
634
  output_variables=output_vars,
592
635
  input_variables=node.get('input', ['messages']),
593
636
  structured_output=node.get('structured_output', False),
637
+ tool_execution_timeout=node.get('tool_execution_timeout', 900),
594
638
  available_tools=available_tools,
595
- tool_names=tool_names))
596
- elif node_type == 'router':
597
- # Add a RouterNode as an independent node
598
- lg_builder.add_node(node_id, RouterNode(
599
- name=node['id'],
600
- condition=node.get('condition', ''),
601
- routes=node.get('routes', []),
602
- default_output=node.get('default_output', 'END'),
603
- input_variables=node.get('input', ['messages'])
639
+ tool_names=tool_names,
640
+ steps_limit=kwargs.get('steps_limit', 25)
604
641
  ))
642
+ elif node_type in ['router', 'decision']:
643
+ if node_type == 'router':
644
+ # Add a RouterNode as an independent node
645
+ lg_builder.add_node(node_id, RouterNode(
646
+ name=node_id,
647
+ condition=node.get('condition', ''),
648
+ routes=node.get('routes', []),
649
+ default_output=node.get('default_output', 'END'),
650
+ input_variables=node.get('input', ['messages'])
651
+ ))
652
+ elif node_type == 'decision':
653
+ logger.info(f'Adding decision: {node["nodes"]}')
654
+ lg_builder.add_node(node_id, DecisionEdge(
655
+ client, node['nodes'],
656
+ node.get('description', ""),
657
+ decisional_inputs=node.get('decisional_inputs', ['messages']),
658
+ default_output=node.get('default_output', 'END'),
659
+ is_node=True
660
+ ))
661
+
605
662
  # Add a single conditional edge for all routes
606
663
  lg_builder.add_conditional_edges(
607
664
  node_id,
@@ -612,6 +669,7 @@ def create_graph(
612
669
  default_output=node.get('default_output', 'END')
613
670
  )
614
671
  )
672
+ continue
615
673
  elif node_type == 'state_modifier':
616
674
  lg_builder.add_node(node_id, StateModifierNode(
617
675
  template=node.get('template', ''),
@@ -619,6 +677,22 @@ def create_graph(
619
677
  input_variables=node.get('input', ['messages']),
620
678
  output_variables=node.get('output', [])
621
679
  ))
680
+ elif node_type == 'printer':
681
+ lg_builder.add_node(node_id, PrinterNode(
682
+ input_mapping=node.get('input_mapping', {'printer': {'type': 'fixed', 'value': ''}}),
683
+ ))
684
+
685
+ # add interrupts after printer node if specified
686
+ interrupt_after.append(clean_string(node_id))
687
+
688
+ # reset printer output variable to avoid carrying over
689
+ reset_node_id = f"{node_id}_reset"
690
+ lg_builder.add_node(reset_node_id, PrinterNode(
691
+ input_mapping={'printer': {'type': 'fixed', 'value': PRINTER_COMPLETED_STATE}}
692
+ ))
693
+ lg_builder.add_conditional_edges(node_id, TransitionalEdge(reset_node_id))
694
+ lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
695
+ continue
622
696
  if node.get('transition'):
623
697
  next_step = clean_string(node['transition'])
624
698
  logger.info(f'Adding transition: {next_step}')
@@ -695,11 +769,15 @@ def set_defaults(d):
695
769
  type_defaults = {
696
770
  'str': '',
697
771
  'list': [],
772
+ 'dict': {},
698
773
  'int': 0,
699
774
  'float': 0.0,
700
775
  'bool': False,
701
776
  # add more types as needed
702
777
  }
778
+ # Build state_types mapping with STRING type names (not actual type objects)
779
+ state_types = {}
780
+
703
781
  for k, v in d.items():
704
782
  # Skip 'input' key as it is not a state initial variable
705
783
  if k == 'input':
@@ -707,6 +785,16 @@ def set_defaults(d):
707
785
  # set value or default if type is defined
708
786
  if 'value' not in v:
709
787
  v['value'] = type_defaults.get(v['type'], None)
788
+
789
+ # Also build the state_types mapping with STRING type names
790
+ var_type = v['type'] if isinstance(v, dict) else v
791
+ if var_type in ['str', 'int', 'float', 'bool', 'list', 'dict', 'number']:
792
+ # Store the string type name, not the actual type object
793
+ state_types[k] = var_type if var_type != 'number' else 'int'
794
+
795
+ # Add state_types as a default value that will be set at initialization
796
+ # Use string type names to avoid serialization issues
797
+ d['state_types'] = {'type': 'dict', 'value': state_types}
710
798
  return d
711
799
 
712
800
  def convert_dict_to_message(msg_dict):
@@ -740,60 +828,208 @@ class LangGraphAgentRunnable(CompiledStateGraph):
740
828
  def invoke(self, input: Union[dict[str, Any], Any],
741
829
  config: Optional[RunnableConfig] = None,
742
830
  *args, **kwargs):
743
- logger.info(f"Incomming Input: {input}")
831
+ logger.info(f"Incoming Input: {input}")
744
832
  if config is None:
745
833
  config = RunnableConfig()
746
834
  if not config.get("configurable", {}).get("thread_id", ""):
747
835
  config["configurable"] = {"thread_id": str(uuid4())}
748
836
  thread_id = config.get("configurable", {}).get("thread_id")
837
+
838
+ # Check if checkpoint exists early for chat_history handling
839
+ checkpoint_exists = self.checkpointer and self.checkpointer.get_tuple(config)
840
+
749
841
  # Handle chat history and current input properly
750
842
  if input.get('chat_history') and not input.get('messages'):
751
- # Convert chat history dict messages to LangChain message objects
752
- chat_history = input.pop('chat_history')
753
- input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
754
-
843
+ if checkpoint_exists:
844
+ # Checkpoint already has conversation history - discard redundant chat_history
845
+ input.pop('chat_history', None)
846
+ else:
847
+ # No checkpoint - convert chat history dict messages to LangChain message objects
848
+ chat_history = input.pop('chat_history')
849
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
850
+
851
+ # handler for LLM node: if no input (Chat perspective), then take last human message
852
+ # Track if input came from messages to handle content extraction properly
853
+ input_from_messages = False
854
+ if not input.get('input'):
855
+ if input.get('messages'):
856
+ input['input'] = [next((msg for msg in reversed(input['messages']) if isinstance(msg, HumanMessage)),
857
+ None)]
858
+ if input['input'] is not None:
859
+ input_from_messages = True
860
+
755
861
  # Append current input to existing messages instead of overwriting
756
862
  if input.get('input'):
757
- current_message = input.get('input')[-1]
863
+ if isinstance(input['input'], str):
864
+ current_message = input['input']
865
+ else:
866
+ # input can be a list of messages or a single message object
867
+ current_message = input.get('input')[-1]
868
+
758
869
  # TODO: add handler after we add 2+ inputs (filterByType, etc.)
759
- input['input'] = current_message if isinstance(current_message, str) else str(current_message)
870
+ if isinstance(current_message, HumanMessage):
871
+ current_content = current_message.content
872
+ if isinstance(current_content, list):
873
+ # Extract text parts and keep non-text parts (images, etc.)
874
+ text_contents = []
875
+ non_text_parts = []
876
+
877
+ for item in current_content:
878
+ if isinstance(item, dict) and item.get('type') == 'text':
879
+ text_contents.append(item['text'])
880
+ elif isinstance(item, str):
881
+ text_contents.append(item)
882
+ else:
883
+ # Keep image_url and other non-text content
884
+ non_text_parts.append(item)
885
+
886
+ # Set input to the joined text
887
+ input['input'] = ". ".join(text_contents) if text_contents else ""
888
+
889
+ # If this message came from input['messages'], update or remove it
890
+ if input_from_messages:
891
+ if non_text_parts:
892
+ # Keep the message but only with non-text content (images, etc.)
893
+ current_message.content = non_text_parts
894
+ else:
895
+ # All content was text, remove this message from the list
896
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
897
+ else:
898
+ # Message came from input['input'], not from input['messages']
899
+ # If there are non-text parts (images, etc.), preserve them in messages
900
+ if non_text_parts:
901
+ # Initialize messages if it doesn't exist or is empty
902
+ if not input.get('messages'):
903
+ input['messages'] = []
904
+ # Create a new message with only non-text content
905
+ non_text_message = HumanMessage(content=non_text_parts)
906
+ input['messages'].append(non_text_message)
907
+
908
+ elif isinstance(current_content, str):
909
+ # on regenerate case
910
+ input['input'] = current_content
911
+ # If from messages and all content is text, remove the message
912
+ if input_from_messages:
913
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
914
+ else:
915
+ input['input'] = str(current_content)
916
+ # If from messages, remove since we extracted the content
917
+ if input_from_messages:
918
+ input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
919
+ elif isinstance(current_message, str):
920
+ input['input'] = current_message
921
+ else:
922
+ input['input'] = str(current_message)
760
923
  if input.get('messages'):
761
924
  # Ensure existing messages are LangChain objects
762
925
  input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
763
926
  # Append to existing messages
764
- input['messages'].append(current_message)
765
- else:
766
- # No existing messages, create new list
767
- input['messages'] = [current_message]
927
+ # input['messages'].append(current_message)
928
+ # else:
929
+ # NOTE: Commented out to prevent duplicates with input['input']
930
+ # input['messages'] = [current_message]
931
+
932
+ # Validate that input is not empty after all processing
933
+ if not input.get('input'):
934
+ raise RuntimeError(
935
+ "Empty input after processing. Cannot send empty string to LLM. "
936
+ "This likely means the message contained only non-text content "
937
+ "with no accompanying text."
938
+ )
939
+
768
940
  logging.info(f"Input: {thread_id} - {input}")
769
- if self.checkpointer and self.checkpointer.get_tuple(config):
770
- self.update_state(config, input)
771
- result = super().invoke(None, config=config, *args, **kwargs)
772
- else:
773
- result = super().invoke(input, config=config, *args, **kwargs)
774
941
  try:
775
- if self.output_variables and self.output_variables[0] != "messages":
776
- # If output_variables are specified, use the value of first one or use the last messages as default
777
- output = result.get(self.output_variables[0], result['messages'][-1].content)
942
+ if self.checkpointer and self.checkpointer.get_tuple(config):
943
+ if config.pop("should_continue", False):
944
+ invoke_input = input
945
+ else:
946
+ self.update_state(config, input)
947
+ invoke_input = None
948
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
949
+ else:
950
+ result = super().invoke(input, config=config, *args, **kwargs)
951
+ except GraphRecursionError as e:
952
+ current_recursion_limit = config.get("recursion_limit", 0)
953
+ logger.warning("ToolExecutionLimitReached caught in LangGraphAgentRunnable: %s", e)
954
+ return self._handle_graph_recursion_error(
955
+ config=config,
956
+ thread_id=thread_id,
957
+ current_recursion_limit=current_recursion_limit,
958
+ )
959
+
960
+ try:
961
+ # Check if printer node output exists
962
+ printer_output = result.get(PRINTER_NODE_RS)
963
+ if printer_output == PRINTER_COMPLETED_STATE:
964
+ # Printer completed, extract last AI message
965
+ messages = result['messages']
966
+ output = next(
967
+ (msg.content for msg in reversed(messages)
968
+ if not isinstance(msg, HumanMessage)),
969
+ messages[-1].content
970
+ )
971
+ elif printer_output is not None:
972
+ # Printer node has output (interrupted state)
973
+ output = printer_output
778
974
  else:
779
- output = result['messages'][-1].content
780
- except:
781
- output = list(result.values())[-1]
782
- thread_id = None
975
+ # No printer node, extract last AI message from messages
976
+ messages = result.get('messages', [])
977
+ output = next(
978
+ (msg.content for msg in reversed(messages)
979
+ if not isinstance(msg, HumanMessage)),
980
+ None
981
+ )
982
+ except Exception:
983
+ # Fallback: try to get last value or last message
984
+ output = list(result.values())[-1] if result else None
783
985
  config_state = self.get_state(config)
784
- if config_state.next:
785
- thread_id = config['configurable']['thread_id']
986
+ is_execution_finished = not config_state.next
987
+ if is_execution_finished:
988
+ thread_id = None
989
+
990
+ final_output = f"Assistant run has been completed, but output is None.\nAdding last message if any: {messages[-1] if messages else []}" if is_execution_finished and output is None else output
786
991
 
787
992
  result_with_state = {
788
- "output": output,
993
+ "output": final_output,
789
994
  "thread_id": thread_id,
790
- "execution_finished": not config_state.next
995
+ "execution_finished": is_execution_finished
791
996
  }
792
997
 
793
998
  # Include all state values in the result
794
999
  if hasattr(config_state, 'values') and config_state.values:
1000
+ # except of key = 'output' which is already included
1001
+ for key, value in config_state.values.items():
1002
+ if key != 'output':
1003
+ result_with_state[key] = value
1004
+
1005
+ return result_with_state
1006
+
1007
+ def _handle_graph_recursion_error(
1008
+ self,
1009
+ config: RunnableConfig,
1010
+ thread_id: str,
1011
+ current_recursion_limit: int,
1012
+ ) -> dict:
1013
+ """Handle GraphRecursionError by returning a soft-boundary response."""
1014
+ config_state = self.get_state(config)
1015
+ is_execution_finished = False
1016
+
1017
+ friendly_output = (
1018
+ f"Tool step limit {current_recursion_limit} reached for this run. You can continue by sending another "
1019
+ "message or refining your request."
1020
+ )
1021
+
1022
+ result_with_state: dict[str, Any] = {
1023
+ "output": friendly_output,
1024
+ "thread_id": thread_id,
1025
+ "execution_finished": is_execution_finished,
1026
+ "tool_execution_limit_reached": True,
1027
+ }
1028
+
1029
+ if hasattr(config_state, "values") and config_state.values:
795
1030
  for key, value in config_state.values.items():
796
- result_with_state[key] = value
1031
+ if key != "output":
1032
+ result_with_state[key] = value
797
1033
 
798
1034
  return result_with_state
799
1035