alita-sdk 0.3.465__py3-none-any.whl → 0.3.486__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (90) hide show
  1. alita_sdk/cli/agent/__init__.py +5 -0
  2. alita_sdk/cli/agent/default.py +83 -1
  3. alita_sdk/cli/agent_loader.py +6 -9
  4. alita_sdk/cli/agent_ui.py +13 -3
  5. alita_sdk/cli/agents.py +1866 -185
  6. alita_sdk/cli/callbacks.py +96 -25
  7. alita_sdk/cli/cli.py +10 -1
  8. alita_sdk/cli/config.py +151 -9
  9. alita_sdk/cli/context/__init__.py +30 -0
  10. alita_sdk/cli/context/cleanup.py +198 -0
  11. alita_sdk/cli/context/manager.py +731 -0
  12. alita_sdk/cli/context/message.py +285 -0
  13. alita_sdk/cli/context/strategies.py +289 -0
  14. alita_sdk/cli/context/token_estimation.py +127 -0
  15. alita_sdk/cli/input_handler.py +167 -4
  16. alita_sdk/cli/inventory.py +1256 -0
  17. alita_sdk/cli/toolkit.py +14 -17
  18. alita_sdk/cli/toolkit_loader.py +35 -5
  19. alita_sdk/cli/tools/__init__.py +8 -1
  20. alita_sdk/cli/tools/filesystem.py +815 -55
  21. alita_sdk/cli/tools/planning.py +143 -157
  22. alita_sdk/cli/tools/terminal.py +154 -20
  23. alita_sdk/community/__init__.py +64 -8
  24. alita_sdk/community/inventory/__init__.py +224 -0
  25. alita_sdk/community/inventory/config.py +257 -0
  26. alita_sdk/community/inventory/enrichment.py +2137 -0
  27. alita_sdk/community/inventory/extractors.py +1469 -0
  28. alita_sdk/community/inventory/ingestion.py +3172 -0
  29. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  30. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  31. alita_sdk/community/inventory/parsers/base.py +295 -0
  32. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  33. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  34. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  35. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  36. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  37. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  38. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  39. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  40. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  41. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  42. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  43. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  44. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  45. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  46. alita_sdk/community/inventory/patterns/loader.py +348 -0
  47. alita_sdk/community/inventory/patterns/registry.py +198 -0
  48. alita_sdk/community/inventory/presets.py +535 -0
  49. alita_sdk/community/inventory/retrieval.py +1403 -0
  50. alita_sdk/community/inventory/toolkit.py +169 -0
  51. alita_sdk/community/inventory/visualize.py +1370 -0
  52. alita_sdk/configurations/bitbucket.py +0 -3
  53. alita_sdk/runtime/clients/client.py +84 -26
  54. alita_sdk/runtime/langchain/assistant.py +4 -2
  55. alita_sdk/runtime/langchain/langraph_agent.py +122 -31
  56. alita_sdk/runtime/llms/preloaded.py +2 -6
  57. alita_sdk/runtime/toolkits/__init__.py +2 -0
  58. alita_sdk/runtime/toolkits/application.py +1 -1
  59. alita_sdk/runtime/toolkits/mcp.py +46 -36
  60. alita_sdk/runtime/toolkits/planning.py +171 -0
  61. alita_sdk/runtime/toolkits/tools.py +39 -6
  62. alita_sdk/runtime/tools/llm.py +185 -8
  63. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  64. alita_sdk/runtime/tools/planning/models.py +246 -0
  65. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  66. alita_sdk/runtime/tools/vectorstore_base.py +41 -6
  67. alita_sdk/runtime/utils/mcp_oauth.py +80 -0
  68. alita_sdk/runtime/utils/streamlit.py +6 -10
  69. alita_sdk/runtime/utils/toolkit_utils.py +19 -4
  70. alita_sdk/tools/__init__.py +54 -27
  71. alita_sdk/tools/ado/repos/repos_wrapper.py +1 -2
  72. alita_sdk/tools/base_indexer_toolkit.py +98 -19
  73. alita_sdk/tools/bitbucket/__init__.py +2 -2
  74. alita_sdk/tools/chunkers/__init__.py +3 -1
  75. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +95 -6
  76. alita_sdk/tools/chunkers/universal_chunker.py +269 -0
  77. alita_sdk/tools/code_indexer_toolkit.py +55 -22
  78. alita_sdk/tools/elitea_base.py +86 -21
  79. alita_sdk/tools/jira/__init__.py +1 -1
  80. alita_sdk/tools/jira/api_wrapper.py +91 -40
  81. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  82. alita_sdk/tools/qtest/__init__.py +1 -1
  83. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +8 -2
  84. alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
  85. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/METADATA +2 -1
  86. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/RECORD +90 -50
  87. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/WHEEL +0 -0
  88. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/entry_points.txt +0 -0
  89. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/licenses/LICENSE +0 -0
  90. {alita_sdk-0.3.465.dist-info → alita_sdk-0.3.486.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,3 @@
1
- from typing import Optional
2
-
3
- from atlassian import Bitbucket
4
1
  from pydantic import BaseModel, ConfigDict, Field, SecretStr
5
2
 
6
3
 
@@ -22,6 +22,7 @@ from .artifact import Artifact
22
22
  from ..langchain.chat_message_template import Jinja2TemplatedChatMessagesTemplate
23
23
  from ..utils.utils import TOOLKIT_SPLITTER
24
24
  from ...tools import get_available_toolkit_models
25
+ from ...tools.base_indexer_toolkit import IndexTools
25
26
 
26
27
  logger = logging.getLogger(__name__)
27
28
 
@@ -178,7 +179,7 @@ class AlitaClient:
178
179
 
179
180
  def get_available_models(self):
180
181
  """Get list of available models from the configurations API.
181
-
182
+
182
183
  Returns:
183
184
  List of model dictionaries with 'name' and other properties,
184
185
  or empty list if request fails.
@@ -221,18 +222,45 @@ class AlitaClient:
221
222
 
222
223
  logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
223
224
 
224
- return ChatOpenAI(
225
- base_url=f"{self.base_url}{self.llm_path}",
226
- model=model_name,
227
- api_key=self.auth_token,
228
- streaming=model_config.get("streaming", True),
229
- stream_usage=model_config.get("stream_usage", True),
230
- max_tokens=model_config.get("max_tokens", None),
231
- temperature=model_config.get("temperature"),
232
- max_retries=model_config.get("max_retries", 3),
233
- seed=model_config.get("seed", None),
234
- openai_organization=str(self.project_id),
235
- )
225
+ try:
226
+ from tools import this # pylint: disable=E0401,C0415
227
+ worker_config = this.for_module("indexer_worker").descriptor.config
228
+ except: # pylint: disable=W0702
229
+ worker_config = {}
230
+
231
+ use_responses_api = False
232
+
233
+ if worker_config and isinstance(worker_config, dict):
234
+ for target_name_tag in worker_config.get("use_responses_api_for", []):
235
+ if target_name_tag in model_name:
236
+ use_responses_api = True
237
+ break
238
+
239
+ # handle case when max_tokens are auto-configurable == -1
240
+ llm_max_tokens = model_config.get("max_tokens", None)
241
+ if llm_max_tokens and llm_max_tokens == -1:
242
+ logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto`')
243
+ # default nuber for a case when auto is selected for an agent
244
+ llm_max_tokens = 4000
245
+
246
+ target_kwargs = {
247
+ "base_url": f"{self.base_url}{self.llm_path}",
248
+ "model": model_name,
249
+ "api_key": self.auth_token,
250
+ "streaming": model_config.get("streaming", True),
251
+ "stream_usage": model_config.get("stream_usage", True),
252
+ "max_tokens": llm_max_tokens,
253
+ "temperature": model_config.get("temperature"),
254
+ "reasoning_effort": model_config.get("reasoning_effort"),
255
+ "max_retries": model_config.get("max_retries", 3),
256
+ "seed": model_config.get("seed", None),
257
+ "openai_organization": str(self.project_id),
258
+ }
259
+
260
+ if use_responses_api:
261
+ target_kwargs["use_responses_api"] = True
262
+
263
+ return ChatOpenAI(**target_kwargs)
236
264
 
237
265
  def generate_image(self,
238
266
  prompt: str,
@@ -318,7 +346,8 @@ class AlitaClient:
318
346
  app_type=None, memory=None, runtime='langchain',
319
347
  application_variables: Optional[dict] = None,
320
348
  version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
321
- llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None):
349
+ llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None,
350
+ conversation_id: Optional[str] = None):
322
351
  if tools is None:
323
352
  tools = []
324
353
  if chat_history is None:
@@ -338,11 +367,15 @@ class AlitaClient:
338
367
  if var['name'] in application_variables:
339
368
  var.update(application_variables[var['name']])
340
369
  if llm is None:
370
+ max_tokens = data['llm_settings'].get('max_tokens', 4000)
371
+ if max_tokens == -1:
372
+ # default nuber for case when auto is selected for agent
373
+ max_tokens = 4000
341
374
  llm = self.get_llm(
342
375
  model_name=data['llm_settings']['model_name'],
343
376
  model_config={
344
- "max_tokens": data['llm_settings']['max_tokens'],
345
- "top_p": data['llm_settings']['top_p'],
377
+ "max_tokens": max_tokens,
378
+ "reasoning_effort": data['llm_settings'].get('reasoning_effort'),
346
379
  "temperature": data['llm_settings']['temperature'],
347
380
  "model_project_id": data['llm_settings'].get('model_project_id'),
348
381
  }
@@ -357,16 +390,18 @@ class AlitaClient:
357
390
  app_type = "react"
358
391
  elif app_type == 'autogen':
359
392
  app_type = "react"
360
-
393
+
361
394
  # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
362
395
  # The exception will propagate naturally to the indexer worker's outer handler
363
396
  if runtime == 'nonrunnable':
364
397
  return LangChainAssistant(self, data, llm, chat_history, app_type,
365
- tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens)
398
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
399
+ conversation_id=conversation_id)
366
400
  if runtime == 'langchain':
367
401
  return LangChainAssistant(self, data, llm,
368
402
  chat_history, app_type,
369
- tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens).runnable()
403
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
404
+ conversation_id=conversation_id).runnable()
370
405
  elif runtime == 'llama':
371
406
  raise NotImplementedError("LLama runtime is not supported")
372
407
 
@@ -587,7 +622,7 @@ class AlitaClient:
587
622
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
588
623
  memory=None, runtime='langchain', variables: Optional[list] = None,
589
624
  store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
590
- mcp_tokens: Optional[dict] = None):
625
+ mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None):
591
626
  """
592
627
  Create a predict-type agent with minimal configuration.
593
628
 
@@ -623,7 +658,7 @@ class AlitaClient:
623
658
  'tools': tools, # Tool configs that will be processed by get_tools()
624
659
  'variables': variables
625
660
  }
626
-
661
+
627
662
  # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
628
663
  # The exception will propagate naturally to the indexer worker's outer handler
629
664
  return LangChainAssistant(
@@ -635,12 +670,13 @@ class AlitaClient:
635
670
  memory=memory,
636
671
  store=store,
637
672
  debug_mode=debug_mode,
638
- mcp_tokens=mcp_tokens
673
+ mcp_tokens=mcp_tokens,
674
+ conversation_id=conversation_id
639
675
  ).runnable()
640
676
 
641
677
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
642
678
  runtime_config: dict = None, llm_model: str = None,
643
- llm_config: dict = None) -> dict:
679
+ llm_config: dict = None, mcp_tokens: dict = None) -> dict:
644
680
  """
645
681
  Test a single tool from a toolkit with given parameters and runtime callbacks.
646
682
 
@@ -659,6 +695,7 @@ class AlitaClient:
659
695
  - configurable: Additional configuration parameters
660
696
  - tags: Tags for the execution
661
697
  llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
698
+ mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
662
699
  llm_config: Configuration for the LLM containing:
663
700
  - max_tokens: Maximum tokens for response (default: 1000)
664
701
  - temperature: Temperature for response generation (default: 0.1)
@@ -706,7 +743,6 @@ class AlitaClient:
706
743
  llm_config = {
707
744
  'max_tokens': 1024,
708
745
  'temperature': 0.1,
709
- 'top_p': 1.0
710
746
  }
711
747
  import logging
712
748
  logger = logging.getLogger(__name__)
@@ -778,12 +814,26 @@ class AlitaClient:
778
814
 
779
815
  # Instantiate the toolkit with client and LLM support
780
816
  try:
781
- tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
817
+ tools = instantiate_toolkit_with_client(toolkit_config, llm, self, mcp_tokens=mcp_tokens)
782
818
  except Exception as toolkit_error:
783
819
  # Re-raise McpAuthorizationRequired to allow proper handling upstream
784
820
  from ..utils.mcp_oauth import McpAuthorizationRequired
821
+
822
+ # Check if it's McpAuthorizationRequired directly
785
823
  if isinstance(toolkit_error, McpAuthorizationRequired):
824
+ logger.info(f"McpAuthorizationRequired detected, re-raising")
825
+ raise
826
+
827
+ # Also check for wrapped exceptions (e.g., from asyncio)
828
+ if hasattr(toolkit_error, '__cause__') and isinstance(toolkit_error.__cause__, McpAuthorizationRequired):
829
+ logger.info(f"Wrapped McpAuthorizationRequired detected, re-raising cause")
830
+ raise toolkit_error.__cause__
831
+
832
+ # Check exception class name as fallback (in case of module reload issues)
833
+ if toolkit_error.__class__.__name__ == 'McpAuthorizationRequired':
834
+ logger.info(f"McpAuthorizationRequired detected by name, re-raising")
786
835
  raise
836
+
787
837
  # For other errors, return error response
788
838
  return {
789
839
  "success": False,
@@ -891,7 +941,11 @@ class AlitaClient:
891
941
  full_available_tools.append(tool_name_attr)
892
942
 
893
943
  # Create comprehensive error message
894
- error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'."
944
+ error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'.\n"
945
+
946
+ # Custom error for index tools
947
+ if toolkit_name in [tool.value for tool in IndexTools]:
948
+ error_msg += f" Please make sure proper PGVector configuration and embedding model are set in the platform.\n"
895
949
 
896
950
  if base_available_tools and full_available_tools:
897
951
  error_msg += f" Available tools: {base_available_tools} (base names) or {full_available_tools} (full names)"
@@ -1013,6 +1067,10 @@ class AlitaClient:
1013
1067
  }
1014
1068
 
1015
1069
  except Exception as e:
1070
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
1071
+ from ..utils.mcp_oauth import McpAuthorizationRequired
1072
+ if isinstance(e, McpAuthorizationRequired):
1073
+ raise
1016
1074
  logger = logging.getLogger(__name__)
1017
1075
  logger.error(f"Error in test_toolkit_tool: {str(e)}")
1018
1076
  return {
@@ -32,7 +32,8 @@ class Assistant:
32
32
  memory: Optional[Any] = None,
33
33
  store: Optional[BaseStore] = None,
34
34
  debug_mode: Optional[bool] = False,
35
- mcp_tokens: Optional[dict] = None):
35
+ mcp_tokens: Optional[dict] = None,
36
+ conversation_id: Optional[str] = None):
36
37
 
37
38
  self.app_type = app_type
38
39
  self.memory = memory
@@ -96,7 +97,8 @@ class Assistant:
96
97
  llm=self.client,
97
98
  memory_store=self.store,
98
99
  debug_mode=debug_mode,
99
- mcp_tokens=mcp_tokens
100
+ mcp_tokens=mcp_tokens,
101
+ conversation_id=conversation_id
100
102
  )
101
103
  if tools:
102
104
  self.tools += tools
@@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable
12
12
  from langchain_core.runnables import RunnableConfig
13
13
  from langchain_core.tools import BaseTool, ToolException
14
14
  from langgraph.channels.ephemeral_value import EphemeralValue
15
+ from langgraph.errors import GraphRecursionError
15
16
  from langgraph.graph import StateGraph
16
17
  from langgraph.graph.graph import END, START
17
18
  from langgraph.graph.state import CompiledStateGraph
@@ -171,12 +172,13 @@ Answer only with step name, no need to add descrip in case none of the steps are
171
172
  """
172
173
 
173
174
  def __init__(self, client, steps: str, description: str = "", decisional_inputs: Optional[list[str]] = [],
174
- default_output: str = 'END'):
175
+ default_output: str = 'END', is_node: bool = False):
175
176
  self.client = client
176
177
  self.steps = ",".join([clean_string(step) for step in steps])
177
178
  self.description = description
178
179
  self.decisional_inputs = decisional_inputs
179
180
  self.default_output = default_output if default_output != 'END' else END
181
+ self.is_node = is_node
180
182
 
181
183
  def invoke(self, state: Annotated[BaseStore, InjectedStore()], config: Optional[RunnableConfig] = None) -> str:
182
184
  additional_info = ""
@@ -198,7 +200,8 @@ Answer only with step name, no need to add descrip in case none of the steps are
198
200
  dispatch_custom_event(
199
201
  "on_decision_edge", {"decisional_inputs": self.decisional_inputs, "state": state}, config=config
200
202
  )
201
- return result
203
+ # support of legacy `decision` as part of node
204
+ return {"router_output": result} if self.is_node else result
202
205
 
203
206
 
204
207
  class TransitionalEdge(Runnable):
@@ -636,15 +639,26 @@ def create_graph(
636
639
  tool_names=tool_names,
637
640
  steps_limit=kwargs.get('steps_limit', 25)
638
641
  ))
639
- elif node_type == 'router':
640
- # Add a RouterNode as an independent node
641
- lg_builder.add_node(node_id, RouterNode(
642
- name=node_id,
643
- condition=node.get('condition', ''),
644
- routes=node.get('routes', []),
645
- default_output=node.get('default_output', 'END'),
646
- input_variables=node.get('input', ['messages'])
647
- ))
642
+ elif node_type in ['router', 'decision']:
643
+ if node_type == 'router':
644
+ # Add a RouterNode as an independent node
645
+ lg_builder.add_node(node_id, RouterNode(
646
+ name=node_id,
647
+ condition=node.get('condition', ''),
648
+ routes=node.get('routes', []),
649
+ default_output=node.get('default_output', 'END'),
650
+ input_variables=node.get('input', ['messages'])
651
+ ))
652
+ elif node_type == 'decision':
653
+ logger.info(f'Adding decision: {node["nodes"]}')
654
+ lg_builder.add_node(node_id, DecisionEdge(
655
+ client, node['nodes'],
656
+ node.get('description', ""),
657
+ decisional_inputs=node.get('decisional_inputs', ['messages']),
658
+ default_output=node.get('default_output', 'END'),
659
+ is_node=True
660
+ ))
661
+
648
662
  # Add a single conditional edge for all routes
649
663
  lg_builder.add_conditional_edges(
650
664
  node_id,
@@ -820,11 +834,19 @@ class LangGraphAgentRunnable(CompiledStateGraph):
820
834
  if not config.get("configurable", {}).get("thread_id", ""):
821
835
  config["configurable"] = {"thread_id": str(uuid4())}
822
836
  thread_id = config.get("configurable", {}).get("thread_id")
837
+
838
+ # Check if checkpoint exists early for chat_history handling
839
+ checkpoint_exists = self.checkpointer and self.checkpointer.get_tuple(config)
840
+
823
841
  # Handle chat history and current input properly
824
842
  if input.get('chat_history') and not input.get('messages'):
825
- # Convert chat history dict messages to LangChain message objects
826
- chat_history = input.pop('chat_history')
827
- input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
843
+ if checkpoint_exists:
844
+ # Checkpoint already has conversation history - discard redundant chat_history
845
+ input.pop('chat_history', None)
846
+ else:
847
+ # No checkpoint - convert chat history dict messages to LangChain message objects
848
+ chat_history = input.pop('chat_history')
849
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
828
850
 
829
851
  # handler for LLM node: if no input (Chat perspective), then take last human message
830
852
  # Track if input came from messages to handle content extraction properly
@@ -872,6 +894,16 @@ class LangGraphAgentRunnable(CompiledStateGraph):
872
894
  else:
873
895
  # All content was text, remove this message from the list
874
896
  input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
897
+ else:
898
+ # Message came from input['input'], not from input['messages']
899
+ # If there are non-text parts (images, etc.), preserve them in messages
900
+ if non_text_parts:
901
+ # Initialize messages if it doesn't exist or is empty
902
+ if not input.get('messages'):
903
+ input['messages'] = []
904
+ # Create a new message with only non-text content
905
+ non_text_message = HumanMessage(content=non_text_parts)
906
+ input['messages'].append(non_text_message)
875
907
 
876
908
  elif isinstance(current_content, str):
877
909
  # on regenerate case
@@ -906,39 +938,98 @@ class LangGraphAgentRunnable(CompiledStateGraph):
906
938
  )
907
939
 
908
940
  logging.info(f"Input: {thread_id} - {input}")
909
- if self.checkpointer and self.checkpointer.get_tuple(config):
910
- self.update_state(config, input)
911
- if config.pop("should_continue", False):
912
- invoke_input = input
941
+ try:
942
+ if self.checkpointer and self.checkpointer.get_tuple(config):
943
+ if config.pop("should_continue", False):
944
+ invoke_input = input
945
+ else:
946
+ self.update_state(config, input)
947
+ invoke_input = None
948
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
913
949
  else:
914
- invoke_input = None
915
- result = super().invoke(invoke_input, config=config, *args, **kwargs)
916
- else:
917
- result = super().invoke(input, config=config, *args, **kwargs)
950
+ result = super().invoke(input, config=config, *args, **kwargs)
951
+ except GraphRecursionError as e:
952
+ current_recursion_limit = config.get("recursion_limit", 0)
953
+ logger.warning("ToolExecutionLimitReached caught in LangGraphAgentRunnable: %s", e)
954
+ return self._handle_graph_recursion_error(
955
+ config=config,
956
+ thread_id=thread_id,
957
+ current_recursion_limit=current_recursion_limit,
958
+ )
959
+
918
960
  try:
919
- if result.get(PRINTER_NODE_RS) == PRINTER_COMPLETED_STATE:
920
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
921
- result['messages'][-1].content)
961
+ # Check if printer node output exists
962
+ printer_output = result.get(PRINTER_NODE_RS)
963
+ if printer_output == PRINTER_COMPLETED_STATE:
964
+ # Printer completed, extract last AI message
965
+ messages = result['messages']
966
+ output = next(
967
+ (msg.content for msg in reversed(messages)
968
+ if not isinstance(msg, HumanMessage)),
969
+ messages[-1].content
970
+ )
971
+ elif printer_output is not None:
972
+ # Printer node has output (interrupted state)
973
+ output = printer_output
922
974
  else:
923
- # used for printer node output - it will be reset by next `reset` node
924
- output = result.get(PRINTER_NODE_RS)
925
- except:
926
- output = list(result.values())[-1]
975
+ # No printer node, extract last AI message from messages
976
+ messages = result.get('messages', [])
977
+ output = next(
978
+ (msg.content for msg in reversed(messages)
979
+ if not isinstance(msg, HumanMessage)),
980
+ None
981
+ )
982
+ except Exception:
983
+ # Fallback: try to get last value or last message
984
+ output = list(result.values())[-1] if result else None
927
985
  config_state = self.get_state(config)
928
986
  is_execution_finished = not config_state.next
929
987
  if is_execution_finished:
930
988
  thread_id = None
931
989
 
990
+ final_output = f"Assistant run has been completed, but output is None.\nAdding last message if any: {messages[-1] if messages else []}" if is_execution_finished and output is None else output
991
+
932
992
  result_with_state = {
933
- "output": output,
993
+ "output": final_output,
934
994
  "thread_id": thread_id,
935
995
  "execution_finished": is_execution_finished
936
996
  }
937
997
 
938
998
  # Include all state values in the result
939
999
  if hasattr(config_state, 'values') and config_state.values:
1000
+ # except of key = 'output' which is already included
1001
+ for key, value in config_state.values.items():
1002
+ if key != 'output':
1003
+ result_with_state[key] = value
1004
+
1005
+ return result_with_state
1006
+
1007
+ def _handle_graph_recursion_error(
1008
+ self,
1009
+ config: RunnableConfig,
1010
+ thread_id: str,
1011
+ current_recursion_limit: int,
1012
+ ) -> dict:
1013
+ """Handle GraphRecursionError by returning a soft\-boundary response."""
1014
+ config_state = self.get_state(config)
1015
+ is_execution_finished = False
1016
+
1017
+ friendly_output = (
1018
+ f"Tool step limit {current_recursion_limit} reached for this run. You can continue by sending another "
1019
+ "message or refining your request."
1020
+ )
1021
+
1022
+ result_with_state: dict[str, Any] = {
1023
+ "output": friendly_output,
1024
+ "thread_id": thread_id,
1025
+ "execution_finished": is_execution_finished,
1026
+ "tool_execution_limit_reached": True,
1027
+ }
1028
+
1029
+ if hasattr(config_state, "values") and config_state.values:
940
1030
  for key, value in config_state.values.items():
941
- result_with_state[key] = value
1031
+ if key != "output":
1032
+ result_with_state[key] = value
942
1033
 
943
1034
  return result_with_state
944
1035
 
@@ -105,8 +105,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
105
105
  model_name: str = ""
106
106
  max_tokens: Optional[int] = 256
107
107
  temperature: Optional[float] = 0.9
108
- top_p: Optional[float] = 0.9
109
- top_k: Optional[int] = 20
108
+ reasoning_effort: Optional[str] = None
110
109
  token_limit: Optional[int] = 1024
111
110
 
112
111
  _local_streams: Any = PrivateAttr()
@@ -252,8 +251,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
252
251
  "return_full_text": False,
253
252
  "temperature": self.temperature,
254
253
  "do_sample": True,
255
- "top_k": self.top_k,
256
- "top_p": self.top_p,
254
+ "reasoning_effort": self.reasoning_effort
257
255
  }
258
256
  #
259
257
  try:
@@ -302,8 +300,6 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
302
300
  "return_full_text": False,
303
301
  "temperature": self.temperature,
304
302
  "do_sample": True,
305
- "top_k": self.top_k,
306
- "top_p": self.top_p,
307
303
  }
308
304
  #
309
305
  while True:
@@ -6,6 +6,7 @@ This module provides various toolkit implementations for LangGraph agents.
6
6
  from .application import ApplicationToolkit
7
7
  from .artifact import ArtifactToolkit
8
8
  from .datasource import DatasourcesToolkit
9
+ from .planning import PlanningToolkit
9
10
  from .prompt import PromptToolkit
10
11
  from .subgraph import SubgraphToolkit
11
12
  from .vectorstore import VectorStoreToolkit
@@ -16,6 +17,7 @@ __all__ = [
16
17
  "ApplicationToolkit",
17
18
  "ArtifactToolkit",
18
19
  "DatasourcesToolkit",
20
+ "PlanningToolkit",
19
21
  "PromptToolkit",
20
22
  "SubgraphToolkit",
21
23
  "VectorStoreToolkit",
@@ -28,7 +28,7 @@ class ApplicationToolkit(BaseToolkit):
28
28
  version_details = client.get_app_version_details(application_id, application_version_id)
29
29
  model_settings = {
30
30
  "max_tokens": version_details['llm_settings']['max_tokens'],
31
- "top_p": version_details['llm_settings']['top_p'],
31
+ "reasoning_effort": version_details['llm_settings'].get('reasoning_effort'),
32
32
  "temperature": version_details['llm_settings']['temperature'],
33
33
  }
34
34