alita-sdk 0.3.457__py3-none-any.whl → 0.3.486__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (102) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +5 -0
  4. alita_sdk/cli/agent/default.py +258 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +194 -0
  7. alita_sdk/cli/agent_ui.py +228 -0
  8. alita_sdk/cli/agents.py +3592 -0
  9. alita_sdk/cli/callbacks.py +647 -0
  10. alita_sdk/cli/cli.py +168 -0
  11. alita_sdk/cli/config.py +306 -0
  12. alita_sdk/cli/context/__init__.py +30 -0
  13. alita_sdk/cli/context/cleanup.py +198 -0
  14. alita_sdk/cli/context/manager.py +731 -0
  15. alita_sdk/cli/context/message.py +285 -0
  16. alita_sdk/cli/context/strategies.py +289 -0
  17. alita_sdk/cli/context/token_estimation.py +127 -0
  18. alita_sdk/cli/formatting.py +182 -0
  19. alita_sdk/cli/input_handler.py +419 -0
  20. alita_sdk/cli/inventory.py +1256 -0
  21. alita_sdk/cli/mcp_loader.py +315 -0
  22. alita_sdk/cli/toolkit.py +327 -0
  23. alita_sdk/cli/toolkit_loader.py +85 -0
  24. alita_sdk/cli/tools/__init__.py +43 -0
  25. alita_sdk/cli/tools/approval.py +224 -0
  26. alita_sdk/cli/tools/filesystem.py +1665 -0
  27. alita_sdk/cli/tools/planning.py +389 -0
  28. alita_sdk/cli/tools/terminal.py +414 -0
  29. alita_sdk/community/__init__.py +64 -8
  30. alita_sdk/community/inventory/__init__.py +224 -0
  31. alita_sdk/community/inventory/config.py +257 -0
  32. alita_sdk/community/inventory/enrichment.py +2137 -0
  33. alita_sdk/community/inventory/extractors.py +1469 -0
  34. alita_sdk/community/inventory/ingestion.py +3172 -0
  35. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  36. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  37. alita_sdk/community/inventory/parsers/base.py +295 -0
  38. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  39. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  40. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  41. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  42. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  43. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  44. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  45. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  46. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  47. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  48. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  49. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  50. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  51. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  52. alita_sdk/community/inventory/patterns/loader.py +348 -0
  53. alita_sdk/community/inventory/patterns/registry.py +198 -0
  54. alita_sdk/community/inventory/presets.py +535 -0
  55. alita_sdk/community/inventory/retrieval.py +1403 -0
  56. alita_sdk/community/inventory/toolkit.py +169 -0
  57. alita_sdk/community/inventory/visualize.py +1370 -0
  58. alita_sdk/configurations/bitbucket.py +0 -3
  59. alita_sdk/runtime/clients/client.py +99 -26
  60. alita_sdk/runtime/langchain/assistant.py +4 -2
  61. alita_sdk/runtime/langchain/constants.py +2 -1
  62. alita_sdk/runtime/langchain/langraph_agent.py +134 -31
  63. alita_sdk/runtime/langchain/utils.py +1 -1
  64. alita_sdk/runtime/llms/preloaded.py +2 -6
  65. alita_sdk/runtime/toolkits/__init__.py +2 -0
  66. alita_sdk/runtime/toolkits/application.py +1 -1
  67. alita_sdk/runtime/toolkits/mcp.py +46 -36
  68. alita_sdk/runtime/toolkits/planning.py +171 -0
  69. alita_sdk/runtime/toolkits/tools.py +39 -6
  70. alita_sdk/runtime/tools/function.py +17 -5
  71. alita_sdk/runtime/tools/llm.py +249 -14
  72. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  73. alita_sdk/runtime/tools/planning/models.py +246 -0
  74. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  75. alita_sdk/runtime/tools/vectorstore_base.py +41 -6
  76. alita_sdk/runtime/utils/mcp_oauth.py +80 -0
  77. alita_sdk/runtime/utils/streamlit.py +6 -10
  78. alita_sdk/runtime/utils/toolkit_utils.py +19 -4
  79. alita_sdk/tools/__init__.py +54 -27
  80. alita_sdk/tools/ado/repos/repos_wrapper.py +1 -2
  81. alita_sdk/tools/base_indexer_toolkit.py +150 -19
  82. alita_sdk/tools/bitbucket/__init__.py +2 -2
  83. alita_sdk/tools/chunkers/__init__.py +3 -1
  84. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +95 -6
  85. alita_sdk/tools/chunkers/universal_chunker.py +269 -0
  86. alita_sdk/tools/code_indexer_toolkit.py +55 -22
  87. alita_sdk/tools/elitea_base.py +86 -21
  88. alita_sdk/tools/jira/__init__.py +1 -1
  89. alita_sdk/tools/jira/api_wrapper.py +91 -40
  90. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  91. alita_sdk/tools/qtest/__init__.py +1 -1
  92. alita_sdk/tools/qtest/api_wrapper.py +871 -32
  93. alita_sdk/tools/sharepoint/api_wrapper.py +22 -2
  94. alita_sdk/tools/sharepoint/authorization_helper.py +17 -1
  95. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +8 -2
  96. alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
  97. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.486.dist-info}/METADATA +146 -2
  98. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.486.dist-info}/RECORD +102 -40
  99. alita_sdk-0.3.486.dist-info/entry_points.txt +2 -0
  100. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.486.dist-info}/WHEEL +0 -0
  101. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.486.dist-info}/licenses/LICENSE +0 -0
  102. {alita_sdk-0.3.457.dist-info → alita_sdk-0.3.486.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,3 @@
1
- from typing import Optional
2
-
3
- from atlassian import Bitbucket
4
1
  from pydantic import BaseModel, ConfigDict, Field, SecretStr
5
2
 
6
3
 
@@ -22,6 +22,7 @@ from .artifact import Artifact
22
22
  from ..langchain.chat_message_template import Jinja2TemplatedChatMessagesTemplate
23
23
  from ..utils.utils import TOOLKIT_SPLITTER
24
24
  from ...tools import get_available_toolkit_models
25
+ from ...tools.base_indexer_toolkit import IndexTools
25
26
 
26
27
  logger = logging.getLogger(__name__)
27
28
 
@@ -68,6 +69,7 @@ class AlitaClient:
68
69
  self.bucket_url = f"{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}"
69
70
  self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
70
71
  self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
72
+ self.models_url = f'{self.base_url}{self.api_path}/configurations/models/{self.project_id}?include_shared=true'
71
73
  self.image_generation_url = f"{self.base_url}{self.llm_path}/images/generations"
72
74
  self.configurations: list = configurations or []
73
75
  self.model_timeout = kwargs.get('model_timeout', 120)
@@ -175,6 +177,20 @@ class AlitaClient:
175
177
  return resp.json()
176
178
  return []
177
179
 
180
+ def get_available_models(self):
181
+ """Get list of available models from the configurations API.
182
+
183
+ Returns:
184
+ List of model dictionaries with 'name' and other properties,
185
+ or empty list if request fails.
186
+ """
187
+ resp = requests.get(self.models_url, headers=self.headers, verify=False)
188
+ if resp.ok:
189
+ data = resp.json()
190
+ # API returns {"items": [...], ...}
191
+ return data.get('items', [])
192
+ return []
193
+
178
194
  def get_embeddings(self, embedding_model: str) -> OpenAIEmbeddings:
179
195
  """
180
196
  Get an instance of OpenAIEmbeddings configured with the project ID and auth token.
@@ -206,18 +222,45 @@ class AlitaClient:
206
222
 
207
223
  logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
208
224
 
209
- return ChatOpenAI(
210
- base_url=f"{self.base_url}{self.llm_path}",
211
- model=model_name,
212
- api_key=self.auth_token,
213
- streaming=model_config.get("streaming", True),
214
- stream_usage=model_config.get("stream_usage", True),
215
- max_tokens=model_config.get("max_tokens", None),
216
- temperature=model_config.get("temperature"),
217
- max_retries=model_config.get("max_retries", 3),
218
- seed=model_config.get("seed", None),
219
- openai_organization=str(self.project_id),
220
- )
225
+ try:
226
+ from tools import this # pylint: disable=E0401,C0415
227
+ worker_config = this.for_module("indexer_worker").descriptor.config
228
+ except: # pylint: disable=W0702
229
+ worker_config = {}
230
+
231
+ use_responses_api = False
232
+
233
+ if worker_config and isinstance(worker_config, dict):
234
+ for target_name_tag in worker_config.get("use_responses_api_for", []):
235
+ if target_name_tag in model_name:
236
+ use_responses_api = True
237
+ break
238
+
239
+ # handle case when max_tokens are auto-configurable == -1
240
+ llm_max_tokens = model_config.get("max_tokens", None)
241
+ if llm_max_tokens and llm_max_tokens == -1:
242
+ logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto`')
243
+ # default nuber for a case when auto is selected for an agent
244
+ llm_max_tokens = 4000
245
+
246
+ target_kwargs = {
247
+ "base_url": f"{self.base_url}{self.llm_path}",
248
+ "model": model_name,
249
+ "api_key": self.auth_token,
250
+ "streaming": model_config.get("streaming", True),
251
+ "stream_usage": model_config.get("stream_usage", True),
252
+ "max_tokens": llm_max_tokens,
253
+ "temperature": model_config.get("temperature"),
254
+ "reasoning_effort": model_config.get("reasoning_effort"),
255
+ "max_retries": model_config.get("max_retries", 3),
256
+ "seed": model_config.get("seed", None),
257
+ "openai_organization": str(self.project_id),
258
+ }
259
+
260
+ if use_responses_api:
261
+ target_kwargs["use_responses_api"] = True
262
+
263
+ return ChatOpenAI(**target_kwargs)
221
264
 
222
265
  def generate_image(self,
223
266
  prompt: str,
@@ -303,7 +346,8 @@ class AlitaClient:
303
346
  app_type=None, memory=None, runtime='langchain',
304
347
  application_variables: Optional[dict] = None,
305
348
  version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
306
- llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None):
349
+ llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None,
350
+ conversation_id: Optional[str] = None):
307
351
  if tools is None:
308
352
  tools = []
309
353
  if chat_history is None:
@@ -323,11 +367,15 @@ class AlitaClient:
323
367
  if var['name'] in application_variables:
324
368
  var.update(application_variables[var['name']])
325
369
  if llm is None:
370
+ max_tokens = data['llm_settings'].get('max_tokens', 4000)
371
+ if max_tokens == -1:
372
+ # default nuber for case when auto is selected for agent
373
+ max_tokens = 4000
326
374
  llm = self.get_llm(
327
375
  model_name=data['llm_settings']['model_name'],
328
376
  model_config={
329
- "max_tokens": data['llm_settings']['max_tokens'],
330
- "top_p": data['llm_settings']['top_p'],
377
+ "max_tokens": max_tokens,
378
+ "reasoning_effort": data['llm_settings'].get('reasoning_effort'),
331
379
  "temperature": data['llm_settings']['temperature'],
332
380
  "model_project_id": data['llm_settings'].get('model_project_id'),
333
381
  }
@@ -342,16 +390,18 @@ class AlitaClient:
342
390
  app_type = "react"
343
391
  elif app_type == 'autogen':
344
392
  app_type = "react"
345
-
393
+
346
394
  # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
347
395
  # The exception will propagate naturally to the indexer worker's outer handler
348
396
  if runtime == 'nonrunnable':
349
397
  return LangChainAssistant(self, data, llm, chat_history, app_type,
350
- tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens)
398
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
399
+ conversation_id=conversation_id)
351
400
  if runtime == 'langchain':
352
401
  return LangChainAssistant(self, data, llm,
353
402
  chat_history, app_type,
354
- tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens).runnable()
403
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
404
+ conversation_id=conversation_id).runnable()
355
405
  elif runtime == 'llama':
356
406
  raise NotImplementedError("LLama runtime is not supported")
357
407
 
@@ -565,14 +615,14 @@ class AlitaClient:
565
615
  monitoring_meta = tasknode_task.meta.get("monitoring", {})
566
616
  return monitoring_meta["user_id"]
567
617
  except Exception as e:
568
- logger.warning(f"Error: Could not determine user ID for MCP tool: {e}")
618
+ logger.debug(f"Error: Could not determine user ID for MCP tool: {e}")
569
619
  return None
570
620
 
571
621
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
572
622
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
573
623
  memory=None, runtime='langchain', variables: Optional[list] = None,
574
624
  store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
575
- mcp_tokens: Optional[dict] = None):
625
+ mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None):
576
626
  """
577
627
  Create a predict-type agent with minimal configuration.
578
628
 
@@ -608,7 +658,7 @@ class AlitaClient:
608
658
  'tools': tools, # Tool configs that will be processed by get_tools()
609
659
  'variables': variables
610
660
  }
611
-
661
+
612
662
  # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
613
663
  # The exception will propagate naturally to the indexer worker's outer handler
614
664
  return LangChainAssistant(
@@ -620,12 +670,13 @@ class AlitaClient:
620
670
  memory=memory,
621
671
  store=store,
622
672
  debug_mode=debug_mode,
623
- mcp_tokens=mcp_tokens
673
+ mcp_tokens=mcp_tokens,
674
+ conversation_id=conversation_id
624
675
  ).runnable()
625
676
 
626
677
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
627
678
  runtime_config: dict = None, llm_model: str = None,
628
- llm_config: dict = None) -> dict:
679
+ llm_config: dict = None, mcp_tokens: dict = None) -> dict:
629
680
  """
630
681
  Test a single tool from a toolkit with given parameters and runtime callbacks.
631
682
 
@@ -644,6 +695,7 @@ class AlitaClient:
644
695
  - configurable: Additional configuration parameters
645
696
  - tags: Tags for the execution
646
697
  llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
698
+ mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
647
699
  llm_config: Configuration for the LLM containing:
648
700
  - max_tokens: Maximum tokens for response (default: 1000)
649
701
  - temperature: Temperature for response generation (default: 0.1)
@@ -691,7 +743,6 @@ class AlitaClient:
691
743
  llm_config = {
692
744
  'max_tokens': 1024,
693
745
  'temperature': 0.1,
694
- 'top_p': 1.0
695
746
  }
696
747
  import logging
697
748
  logger = logging.getLogger(__name__)
@@ -763,12 +814,26 @@ class AlitaClient:
763
814
 
764
815
  # Instantiate the toolkit with client and LLM support
765
816
  try:
766
- tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
817
+ tools = instantiate_toolkit_with_client(toolkit_config, llm, self, mcp_tokens=mcp_tokens)
767
818
  except Exception as toolkit_error:
768
819
  # Re-raise McpAuthorizationRequired to allow proper handling upstream
769
820
  from ..utils.mcp_oauth import McpAuthorizationRequired
821
+
822
+ # Check if it's McpAuthorizationRequired directly
770
823
  if isinstance(toolkit_error, McpAuthorizationRequired):
824
+ logger.info(f"McpAuthorizationRequired detected, re-raising")
771
825
  raise
826
+
827
+ # Also check for wrapped exceptions (e.g., from asyncio)
828
+ if hasattr(toolkit_error, '__cause__') and isinstance(toolkit_error.__cause__, McpAuthorizationRequired):
829
+ logger.info(f"Wrapped McpAuthorizationRequired detected, re-raising cause")
830
+ raise toolkit_error.__cause__
831
+
832
+ # Check exception class name as fallback (in case of module reload issues)
833
+ if toolkit_error.__class__.__name__ == 'McpAuthorizationRequired':
834
+ logger.info(f"McpAuthorizationRequired detected by name, re-raising")
835
+ raise
836
+
772
837
  # For other errors, return error response
773
838
  return {
774
839
  "success": False,
@@ -876,7 +941,11 @@ class AlitaClient:
876
941
  full_available_tools.append(tool_name_attr)
877
942
 
878
943
  # Create comprehensive error message
879
- error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'."
944
+ error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'.\n"
945
+
946
+ # Custom error for index tools
947
+ if toolkit_name in [tool.value for tool in IndexTools]:
948
+ error_msg += f" Please make sure proper PGVector configuration and embedding model are set in the platform.\n"
880
949
 
881
950
  if base_available_tools and full_available_tools:
882
951
  error_msg += f" Available tools: {base_available_tools} (base names) or {full_available_tools} (full names)"
@@ -998,6 +1067,10 @@ class AlitaClient:
998
1067
  }
999
1068
 
1000
1069
  except Exception as e:
1070
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
1071
+ from ..utils.mcp_oauth import McpAuthorizationRequired
1072
+ if isinstance(e, McpAuthorizationRequired):
1073
+ raise
1001
1074
  logger = logging.getLogger(__name__)
1002
1075
  logger.error(f"Error in test_toolkit_tool: {str(e)}")
1003
1076
  return {
@@ -32,7 +32,8 @@ class Assistant:
32
32
  memory: Optional[Any] = None,
33
33
  store: Optional[BaseStore] = None,
34
34
  debug_mode: Optional[bool] = False,
35
- mcp_tokens: Optional[dict] = None):
35
+ mcp_tokens: Optional[dict] = None,
36
+ conversation_id: Optional[str] = None):
36
37
 
37
38
  self.app_type = app_type
38
39
  self.memory = memory
@@ -96,7 +97,8 @@ class Assistant:
96
97
  llm=self.client,
97
98
  memory_store=self.store,
98
99
  debug_mode=debug_mode,
99
- mcp_tokens=mcp_tokens
100
+ mcp_tokens=mcp_tokens,
101
+ conversation_id=conversation_id
100
102
  )
101
103
  if tools:
102
104
  self.tools += tools
@@ -83,4 +83,5 @@ DEFAULT_MULTIMODAL_PROMPT = """
83
83
 
84
84
  ELITEA_RS = "elitea_response"
85
85
  PRINTER = "printer"
86
- PRINTER_NODE_RS = "printer_output"
86
+ PRINTER_NODE_RS = "printer_output"
87
+ PRINTER_COMPLETED_STATE = "PRINTER_COMPLETED"
@@ -12,6 +12,7 @@ from langchain_core.runnables import Runnable
12
12
  from langchain_core.runnables import RunnableConfig
13
13
  from langchain_core.tools import BaseTool, ToolException
14
14
  from langgraph.channels.ephemeral_value import EphemeralValue
15
+ from langgraph.errors import GraphRecursionError
15
16
  from langgraph.graph import StateGraph
16
17
  from langgraph.graph.graph import END, START
17
18
  from langgraph.graph.state import CompiledStateGraph
@@ -19,7 +20,7 @@ from langgraph.managed.base import is_managed_value
19
20
  from langgraph.prebuilt import InjectedStore
20
21
  from langgraph.store.base import BaseStore
21
22
 
22
- from .constants import PRINTER_NODE_RS, PRINTER
23
+ from .constants import PRINTER_NODE_RS, PRINTER, PRINTER_COMPLETED_STATE
23
24
  from .mixedAgentRenderes import convert_message_to_json
24
25
  from .utils import create_state, propagate_the_input_mapping, safe_format
25
26
  from ..tools.function import FunctionTool
@@ -171,12 +172,13 @@ Answer only with step name, no need to add descrip in case none of the steps are
171
172
  """
172
173
 
173
174
  def __init__(self, client, steps: str, description: str = "", decisional_inputs: Optional[list[str]] = [],
174
- default_output: str = 'END'):
175
+ default_output: str = 'END', is_node: bool = False):
175
176
  self.client = client
176
177
  self.steps = ",".join([clean_string(step) for step in steps])
177
178
  self.description = description
178
179
  self.decisional_inputs = decisional_inputs
179
180
  self.default_output = default_output if default_output != 'END' else END
181
+ self.is_node = is_node
180
182
 
181
183
  def invoke(self, state: Annotated[BaseStore, InjectedStore()], config: Optional[RunnableConfig] = None) -> str:
182
184
  additional_info = ""
@@ -198,7 +200,8 @@ Answer only with step name, no need to add descrip in case none of the steps are
198
200
  dispatch_custom_event(
199
201
  "on_decision_edge", {"decisional_inputs": self.decisional_inputs, "state": state}, config=config
200
202
  )
201
- return result
203
+ # support of legacy `decision` as part of node
204
+ return {"router_output": result} if self.is_node else result
202
205
 
203
206
 
204
207
  class TransitionalEdge(Runnable):
@@ -244,11 +247,19 @@ class PrinterNode(Runnable):
244
247
  result = {}
245
248
  logger.debug(f"Initial text pattern: {self.input_mapping}")
246
249
  mapping = propagate_the_input_mapping(self.input_mapping, [], state)
250
+ # for printer node we expect that all the lists will be joined into strings already
251
+ # Join any lists that haven't been converted yet
252
+ for key, value in mapping.items():
253
+ if isinstance(value, list):
254
+ mapping[key] = ', '.join(str(item) for item in value)
247
255
  if mapping.get(PRINTER) is None:
248
256
  raise ToolException(f"PrinterNode requires '{PRINTER}' field in input mapping")
249
257
  formatted_output = mapping[PRINTER]
250
258
  # add info label to the printer's output
251
- if formatted_output:
259
+ if not formatted_output == PRINTER_COMPLETED_STATE:
260
+ # convert formatted output to string if it's not
261
+ if not isinstance(formatted_output, str):
262
+ formatted_output = str(formatted_output)
252
263
  formatted_output += f"\n\n-----\n*How to proceed?*\n* *to resume the pipeline - type anything...*"
253
264
  logger.debug(f"Formatted output: {formatted_output}")
254
265
  result[PRINTER_NODE_RS] = formatted_output
@@ -628,15 +639,26 @@ def create_graph(
628
639
  tool_names=tool_names,
629
640
  steps_limit=kwargs.get('steps_limit', 25)
630
641
  ))
631
- elif node_type == 'router':
632
- # Add a RouterNode as an independent node
633
- lg_builder.add_node(node_id, RouterNode(
634
- name=node_id,
635
- condition=node.get('condition', ''),
636
- routes=node.get('routes', []),
637
- default_output=node.get('default_output', 'END'),
638
- input_variables=node.get('input', ['messages'])
639
- ))
642
+ elif node_type in ['router', 'decision']:
643
+ if node_type == 'router':
644
+ # Add a RouterNode as an independent node
645
+ lg_builder.add_node(node_id, RouterNode(
646
+ name=node_id,
647
+ condition=node.get('condition', ''),
648
+ routes=node.get('routes', []),
649
+ default_output=node.get('default_output', 'END'),
650
+ input_variables=node.get('input', ['messages'])
651
+ ))
652
+ elif node_type == 'decision':
653
+ logger.info(f'Adding decision: {node["nodes"]}')
654
+ lg_builder.add_node(node_id, DecisionEdge(
655
+ client, node['nodes'],
656
+ node.get('description', ""),
657
+ decisional_inputs=node.get('decisional_inputs', ['messages']),
658
+ default_output=node.get('default_output', 'END'),
659
+ is_node=True
660
+ ))
661
+
640
662
  # Add a single conditional edge for all routes
641
663
  lg_builder.add_conditional_edges(
642
664
  node_id,
@@ -666,7 +688,7 @@ def create_graph(
666
688
  # reset printer output variable to avoid carrying over
667
689
  reset_node_id = f"{node_id}_reset"
668
690
  lg_builder.add_node(reset_node_id, PrinterNode(
669
- input_mapping={'printer': {'type': 'fixed', 'value': ''}}
691
+ input_mapping={'printer': {'type': 'fixed', 'value': PRINTER_COMPLETED_STATE}}
670
692
  ))
671
693
  lg_builder.add_conditional_edges(node_id, TransitionalEdge(reset_node_id))
672
694
  lg_builder.add_conditional_edges(reset_node_id, TransitionalEdge(clean_string(node['transition'])))
@@ -812,11 +834,19 @@ class LangGraphAgentRunnable(CompiledStateGraph):
812
834
  if not config.get("configurable", {}).get("thread_id", ""):
813
835
  config["configurable"] = {"thread_id": str(uuid4())}
814
836
  thread_id = config.get("configurable", {}).get("thread_id")
837
+
838
+ # Check if checkpoint exists early for chat_history handling
839
+ checkpoint_exists = self.checkpointer and self.checkpointer.get_tuple(config)
840
+
815
841
  # Handle chat history and current input properly
816
842
  if input.get('chat_history') and not input.get('messages'):
817
- # Convert chat history dict messages to LangChain message objects
818
- chat_history = input.pop('chat_history')
819
- input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
843
+ if checkpoint_exists:
844
+ # Checkpoint already has conversation history - discard redundant chat_history
845
+ input.pop('chat_history', None)
846
+ else:
847
+ # No checkpoint - convert chat history dict messages to LangChain message objects
848
+ chat_history = input.pop('chat_history')
849
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
820
850
 
821
851
  # handler for LLM node: if no input (Chat perspective), then take last human message
822
852
  # Track if input came from messages to handle content extraction properly
@@ -864,6 +894,16 @@ class LangGraphAgentRunnable(CompiledStateGraph):
864
894
  else:
865
895
  # All content was text, remove this message from the list
866
896
  input['messages'] = [msg for msg in input['messages'] if msg is not current_message]
897
+ else:
898
+ # Message came from input['input'], not from input['messages']
899
+ # If there are non-text parts (images, etc.), preserve them in messages
900
+ if non_text_parts:
901
+ # Initialize messages if it doesn't exist or is empty
902
+ if not input.get('messages'):
903
+ input['messages'] = []
904
+ # Create a new message with only non-text content
905
+ non_text_message = HumanMessage(content=non_text_parts)
906
+ input['messages'].append(non_text_message)
867
907
 
868
908
  elif isinstance(current_content, str):
869
909
  # on regenerate case
@@ -898,35 +938,98 @@ class LangGraphAgentRunnable(CompiledStateGraph):
898
938
  )
899
939
 
900
940
  logging.info(f"Input: {thread_id} - {input}")
901
- if self.checkpointer and self.checkpointer.get_tuple(config):
902
- self.update_state(config, input)
903
- result = super().invoke(None, config=config, *args, **kwargs)
904
- else:
905
- result = super().invoke(input, config=config, *args, **kwargs)
906
941
  try:
907
- if not result.get(PRINTER_NODE_RS):
908
- output = next((msg.content for msg in reversed(result['messages']) if not isinstance(msg, HumanMessage)),
909
- result['messages'][-1].content)
942
+ if self.checkpointer and self.checkpointer.get_tuple(config):
943
+ if config.pop("should_continue", False):
944
+ invoke_input = input
945
+ else:
946
+ self.update_state(config, input)
947
+ invoke_input = None
948
+ result = super().invoke(invoke_input, config=config, *args, **kwargs)
910
949
  else:
911
- # used for printer node output - it will be reset by next `reset` node
912
- output = result.get(PRINTER_NODE_RS)
913
- except:
914
- output = list(result.values())[-1]
950
+ result = super().invoke(input, config=config, *args, **kwargs)
951
+ except GraphRecursionError as e:
952
+ current_recursion_limit = config.get("recursion_limit", 0)
953
+ logger.warning("ToolExecutionLimitReached caught in LangGraphAgentRunnable: %s", e)
954
+ return self._handle_graph_recursion_error(
955
+ config=config,
956
+ thread_id=thread_id,
957
+ current_recursion_limit=current_recursion_limit,
958
+ )
959
+
960
+ try:
961
+ # Check if printer node output exists
962
+ printer_output = result.get(PRINTER_NODE_RS)
963
+ if printer_output == PRINTER_COMPLETED_STATE:
964
+ # Printer completed, extract last AI message
965
+ messages = result['messages']
966
+ output = next(
967
+ (msg.content for msg in reversed(messages)
968
+ if not isinstance(msg, HumanMessage)),
969
+ messages[-1].content
970
+ )
971
+ elif printer_output is not None:
972
+ # Printer node has output (interrupted state)
973
+ output = printer_output
974
+ else:
975
+ # No printer node, extract last AI message from messages
976
+ messages = result.get('messages', [])
977
+ output = next(
978
+ (msg.content for msg in reversed(messages)
979
+ if not isinstance(msg, HumanMessage)),
980
+ None
981
+ )
982
+ except Exception:
983
+ # Fallback: try to get last value or last message
984
+ output = list(result.values())[-1] if result else None
915
985
  config_state = self.get_state(config)
916
986
  is_execution_finished = not config_state.next
917
987
  if is_execution_finished:
918
988
  thread_id = None
919
989
 
990
+ final_output = f"Assistant run has been completed, but output is None.\nAdding last message if any: {messages[-1] if messages else []}" if is_execution_finished and output is None else output
991
+
920
992
  result_with_state = {
921
- "output": output,
993
+ "output": final_output,
922
994
  "thread_id": thread_id,
923
995
  "execution_finished": is_execution_finished
924
996
  }
925
997
 
926
998
  # Include all state values in the result
927
999
  if hasattr(config_state, 'values') and config_state.values:
1000
+ # except of key = 'output' which is already included
1001
+ for key, value in config_state.values.items():
1002
+ if key != 'output':
1003
+ result_with_state[key] = value
1004
+
1005
+ return result_with_state
1006
+
1007
+ def _handle_graph_recursion_error(
1008
+ self,
1009
+ config: RunnableConfig,
1010
+ thread_id: str,
1011
+ current_recursion_limit: int,
1012
+ ) -> dict:
1013
+ """Handle GraphRecursionError by returning a soft\-boundary response."""
1014
+ config_state = self.get_state(config)
1015
+ is_execution_finished = False
1016
+
1017
+ friendly_output = (
1018
+ f"Tool step limit {current_recursion_limit} reached for this run. You can continue by sending another "
1019
+ "message or refining your request."
1020
+ )
1021
+
1022
+ result_with_state: dict[str, Any] = {
1023
+ "output": friendly_output,
1024
+ "thread_id": thread_id,
1025
+ "execution_finished": is_execution_finished,
1026
+ "tool_execution_limit_reached": True,
1027
+ }
1028
+
1029
+ if hasattr(config_state, "values") and config_state.values:
928
1030
  for key, value in config_state.values.items():
929
- result_with_state[key] = value
1031
+ if key != "output":
1032
+ result_with_state[key] = value
930
1033
 
931
1034
  return result_with_state
932
1035
 
@@ -131,7 +131,7 @@ def parse_type(type_str):
131
131
 
132
132
 
133
133
  def create_state(data: Optional[dict] = None):
134
- state_dict = {'input': str, 'router_output': str,
134
+ state_dict = {'input': str, 'messages': 'list[str]', 'router_output': str,
135
135
  ELITEA_RS: str, PRINTER_NODE_RS: str} # Always include router_output
136
136
  types_dict = {}
137
137
  if not data:
@@ -105,8 +105,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
105
105
  model_name: str = ""
106
106
  max_tokens: Optional[int] = 256
107
107
  temperature: Optional[float] = 0.9
108
- top_p: Optional[float] = 0.9
109
- top_k: Optional[int] = 20
108
+ reasoning_effort: Optional[str] = None
110
109
  token_limit: Optional[int] = 1024
111
110
 
112
111
  _local_streams: Any = PrivateAttr()
@@ -252,8 +251,7 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
252
251
  "return_full_text": False,
253
252
  "temperature": self.temperature,
254
253
  "do_sample": True,
255
- "top_k": self.top_k,
256
- "top_p": self.top_p,
254
+ "reasoning_effort": self.reasoning_effort
257
255
  }
258
256
  #
259
257
  try:
@@ -302,8 +300,6 @@ class PreloadedChatModel(BaseChatModel): # pylint: disable=R0903
302
300
  "return_full_text": False,
303
301
  "temperature": self.temperature,
304
302
  "do_sample": True,
305
- "top_k": self.top_k,
306
- "top_p": self.top_p,
307
303
  }
308
304
  #
309
305
  while True:
@@ -6,6 +6,7 @@ This module provides various toolkit implementations for LangGraph agents.
6
6
  from .application import ApplicationToolkit
7
7
  from .artifact import ArtifactToolkit
8
8
  from .datasource import DatasourcesToolkit
9
+ from .planning import PlanningToolkit
9
10
  from .prompt import PromptToolkit
10
11
  from .subgraph import SubgraphToolkit
11
12
  from .vectorstore import VectorStoreToolkit
@@ -16,6 +17,7 @@ __all__ = [
16
17
  "ApplicationToolkit",
17
18
  "ArtifactToolkit",
18
19
  "DatasourcesToolkit",
20
+ "PlanningToolkit",
19
21
  "PromptToolkit",
20
22
  "SubgraphToolkit",
21
23
  "VectorStoreToolkit",
@@ -28,7 +28,7 @@ class ApplicationToolkit(BaseToolkit):
28
28
  version_details = client.get_app_version_details(application_id, application_version_id)
29
29
  model_settings = {
30
30
  "max_tokens": version_details['llm_settings']['max_tokens'],
31
- "top_p": version_details['llm_settings']['top_p'],
31
+ "reasoning_effort": version_details['llm_settings'].get('reasoning_effort'),
32
32
  "temperature": version_details['llm_settings']['temperature'],
33
33
  }
34
34