alita-sdk 0.3.182__py3-none-any.whl → 0.3.183__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,6 @@ from langchain.agents import (
6
6
  AgentExecutor, create_openai_tools_agent,
7
7
  create_json_chat_agent)
8
8
  from langgraph.store.base import BaseStore
9
-
10
9
  from .agents.xml_chat import create_xml_chat_agent
11
10
  from .langraph_agent import create_graph
12
11
  from langchain_core.messages import (
@@ -67,8 +66,10 @@ class Assistant:
67
66
  # Lazy import to avoid circular dependency
68
67
  from ..toolkits.tools import get_tools
69
68
  self.tools = get_tools(data['tools'], alita_client=alita, llm=self.client, memory_store=self.store)
70
- if app_type == "pipeline":
69
+ if app_type in ["pipeline", "predict", "react"]:
71
70
  self.prompt = data['instructions']
71
+ if tools:
72
+ self.tools += tools
72
73
  else:
73
74
  self.tools += tools
74
75
  messages = [SystemMessage(content=data['instructions'])]
@@ -121,6 +122,8 @@ class Assistant:
121
122
  return self.getOpenAIToolsAgentExecutor()
122
123
  elif self.app_type == 'xml':
123
124
  return self.getXMLAgentExecutor()
125
+ elif self.app_type in ['predict', 'react']:
126
+ return self.getLangGraphReactAgent()
124
127
  else:
125
128
  self.tools = [EchoTool()] + self.tools
126
129
  return self.getAgentExecutor()
@@ -149,6 +152,125 @@ class Assistant:
149
152
  agent = create_openai_tools_agent(llm=self.client, tools=simple_tools, prompt=self.prompt)
150
153
  return self._agent_executor(agent)
151
154
 
155
+ def getLangGraphReactAgent(self):
156
+ """
157
+ Create a LangGraph react agent using a tool-calling agent pattern.
158
+ This creates a proper LangGraphAgentRunnable with modern tool support.
159
+ """
160
+ # Exclude compiled graph runnables from simple tool agents
161
+ simple_tools = [t for t in self.tools if isinstance(t, BaseTool)]
162
+
163
+ # Set up memory/checkpointer if available
164
+ checkpointer = None
165
+ if self.memory is not None:
166
+ checkpointer = self.memory
167
+ elif self.store is not None:
168
+ # Convert store to checkpointer if needed
169
+ from langgraph.checkpoint.memory import MemorySaver
170
+ checkpointer = MemorySaver()
171
+ else:
172
+ # Ensure we have a checkpointer for conversation persistence
173
+ from langgraph.checkpoint.memory import MemorySaver
174
+ checkpointer = MemorySaver()
175
+ logger.info("Using default MemorySaver for conversation persistence")
176
+
177
+ # Extract all messages from prompt/chat history for LangGraph
178
+ chat_history_messages = []
179
+ prompt_instructions = None
180
+
181
+ if hasattr(self.prompt, 'messages') and self.prompt.messages:
182
+ # Extract all messages from the prompt to use as chat history
183
+ for message in self.prompt.messages:
184
+ # Skip placeholders (MessagesPlaceholder instances) as they are not actual messages
185
+ if hasattr(message, 'variable_name'): # MessagesPlaceholder has variable_name attribute
186
+ continue
187
+ # Skip template messages (contains {{variable}} patterns)
188
+ if hasattr(message, 'content') and isinstance(message.content, str) and '{{' in message.content and '}}' in message.content:
189
+ continue
190
+ # Include actual chat history messages
191
+ chat_history_messages.append(message)
192
+
193
+ # Only use prompt_instructions if explicitly specified (for predict app_type)
194
+ if self.app_type == "predict" and isinstance(self.prompt, str):
195
+ prompt_instructions = self.prompt
196
+
197
+ # Create a unified YAML schema with conditional tool binding
198
+ # Build the base node configuration
199
+ node_config = {
200
+ 'id': 'agent',
201
+ 'type': 'llm',
202
+ 'prompt': {
203
+ 'template': prompt_instructions or "You are a helpful assistant."
204
+ },
205
+ 'input': ['messages'],
206
+ 'output': ['messages'],
207
+ 'transition': 'END'
208
+ }
209
+
210
+ # Add tool binding only if tools are present
211
+ if simple_tools:
212
+ tool_names = [tool.name for tool in simple_tools]
213
+ tool_names_yaml = str(tool_names).replace("'", '"') # Convert to YAML-compatible format
214
+ node_config['tool_names'] = tool_names_yaml
215
+ logger.info("Binding tools: %s", tool_names)
216
+
217
+ # Properly setup the prompt for YAML
218
+ import yaml
219
+ escaped_prompt = prompt_instructions or "You are a helpful assistant."
220
+
221
+ # Create the schema as a dictionary first, then convert to YAML
222
+ state_messages_config = {'type': 'list'}
223
+
224
+ # Only set initial messages if there's actual conversation history (not just system prompts)
225
+ actual_conversation_messages = [
226
+ msg for msg in chat_history_messages
227
+ if not isinstance(msg, SystemMessage) # Exclude system messages as they're handled by prompt template
228
+ ]
229
+
230
+ if actual_conversation_messages:
231
+ state_messages_config['value'] = actual_conversation_messages
232
+ logger.info(f"Setting initial conversation history with {len(actual_conversation_messages)} messages")
233
+
234
+ schema_dict = {
235
+ 'name': 'react_agent',
236
+ 'state': {
237
+ 'messages': state_messages_config
238
+ },
239
+ 'nodes': [{
240
+ 'id': 'agent',
241
+ 'type': 'llm',
242
+ 'prompt': {
243
+ 'template': escaped_prompt
244
+ },
245
+ 'input': ['messages'],
246
+ 'output': ['messages'],
247
+ 'transition': 'END'
248
+ }],
249
+ 'entry_point': 'agent'
250
+ }
251
+
252
+ # Add tool-specific parameters only if tools exist
253
+ if simple_tools:
254
+ schema_dict['nodes'][0]['tool_names'] = tool_names
255
+
256
+ # Convert to YAML string
257
+ yaml_schema = yaml.dump(schema_dict, default_flow_style=False, allow_unicode=True)
258
+
259
+ # Use create_graph function to build the agent like other graph types
260
+ from .langraph_agent import create_graph
261
+
262
+ agent = create_graph(
263
+ client=self.client,
264
+ yaml_schema=yaml_schema,
265
+ tools=simple_tools,
266
+ memory=checkpointer,
267
+ store=self.store,
268
+ debug=False,
269
+ for_subgraph=False
270
+ )
271
+
272
+ return agent
273
+
152
274
  def pipeline(self):
153
275
  memory = self.memory
154
276
  #
@@ -6,7 +6,7 @@ from typing import Dict
6
6
  import yaml
7
7
  import ast
8
8
  from langchain_core.callbacks import dispatch_custom_event
9
- from langchain_core.messages import HumanMessage
9
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, BaseMessage
10
10
  from langchain_core.runnables import Runnable
11
11
  from langchain_core.runnables import RunnableConfig
12
12
  from langchain_core.tools import BaseTool
@@ -498,14 +498,36 @@ def create_graph(
498
498
  var: get_type_hints(state_class).get(var, str).__name__
499
499
  for var in output_vars
500
500
  }
501
+
502
+ # Check if tools should be bound to this LLM node
503
+ tool_names = node.get('tool_names', []) if isinstance(node.get('tool_names'), list) else []
504
+
505
+ # Filter tools if specific tool names are provided
506
+ available_tools = []
507
+
508
+ if tool_names:
509
+ # Filter tools by name
510
+ tool_dict = {tool.name: tool for tool in tools if isinstance(tool, BaseTool)}
511
+ available_tools = [tool_dict[name] for name in tool_names if name in tool_dict]
512
+ if len(available_tools) != len(tool_names):
513
+ missing_tools = [name for name in tool_names if name not in tool_dict]
514
+ logger.warning(f"Some tools not found for LLM node {node_id}: {missing_tools}")
515
+ else:
516
+ # Use all available tools
517
+ available_tools = [tool for tool in tools if isinstance(tool, BaseTool)]
518
+
501
519
  lg_builder.add_node(node_id, LLMNode(
502
- client=client, prompt=node.get('prompt', {}),
503
- name=node['id'], return_type='dict',
520
+ client=client,
521
+ prompt=node.get('prompt', {}),
522
+ name=node['id'],
523
+ return_type='dict',
504
524
  response_key=node.get('response_key', 'messages'),
505
525
  structured_output_dict=output_vars_dict,
506
526
  output_variables=output_vars,
507
527
  input_variables=node.get('input', ['messages']),
508
- structured_output=node.get('structured_output', False)))
528
+ structured_output=node.get('structured_output', False),
529
+ available_tools=available_tools,
530
+ tool_names=tool_names))
509
531
  elif node_type == 'router':
510
532
  # Add a RouterNode as an independent node
511
533
  lg_builder.add_node(node_id, RouterNode(
@@ -604,6 +626,29 @@ def create_graph(
604
626
  return compiled.validate()
605
627
 
606
628
 
629
+ def convert_dict_to_message(msg_dict):
630
+ """Convert a dictionary message to a LangChain message object."""
631
+ if isinstance(msg_dict, BaseMessage):
632
+ return msg_dict # Already a LangChain message
633
+
634
+ if isinstance(msg_dict, dict):
635
+ role = msg_dict.get('role', 'user')
636
+ content = msg_dict.get('content', '')
637
+
638
+ if role == 'user':
639
+ return HumanMessage(content=content)
640
+ elif role == 'assistant':
641
+ return AIMessage(content=content)
642
+ elif role == 'system':
643
+ return SystemMessage(content=content)
644
+ else:
645
+ # Default to HumanMessage for unknown roles
646
+ return HumanMessage(content=content)
647
+
648
+ # If it's neither dict nor BaseMessage, convert to string and make HumanMessage
649
+ return HumanMessage(content=str(msg_dict))
650
+
651
+
607
652
  class LangGraphAgentRunnable(CompiledStateGraph):
608
653
  def __init__(self, *args, output_variables=None, **kwargs):
609
654
  super().__init__(*args, **kwargs)
@@ -612,16 +657,28 @@ class LangGraphAgentRunnable(CompiledStateGraph):
612
657
  def invoke(self, input: Union[dict[str, Any], Any],
613
658
  config: Optional[RunnableConfig] = None,
614
659
  *args, **kwargs):
615
-
660
+ logger.info(f"Incomming Input: {input}")
616
661
  if not config.get("configurable", {}).get("thread_id"):
617
662
  config["configurable"] = {"thread_id": str(uuid4())}
618
663
  thread_id = config.get("configurable", {}).get("thread_id")
664
+ # Handle chat history and current input properly
619
665
  if input.get('chat_history') and not input.get('messages'):
620
- input['messages'] = input.pop('chat_history')
666
+ # Convert chat history dict messages to LangChain message objects
667
+ chat_history = input.pop('chat_history')
668
+ input['messages'] = [convert_dict_to_message(msg) for msg in chat_history]
669
+
670
+ # Append current input to existing messages instead of overwriting
621
671
  if input.get('input'):
622
- input['messages'] = [{"role": "user", "content": input.get('input')}]
672
+ current_message = HumanMessage(content=input.get('input'))
673
+ if input.get('messages'):
674
+ # Ensure existing messages are LangChain objects
675
+ input['messages'] = [convert_dict_to_message(msg) for msg in input['messages']]
676
+ # Append to existing messages
677
+ input['messages'].append(current_message)
678
+ else:
679
+ # No existing messages, create new list
680
+ input['messages'] = [current_message]
623
681
  logging.info(f"Input: {thread_id} - {input}")
624
-
625
682
  if self.checkpointer and self.checkpointer.get_tuple(config):
626
683
  self.update_state(config, input)
627
684
  result = super().invoke(None, config=config, *args, **kwargs)
@@ -35,7 +35,7 @@ from langchain_core.messages import (AIMessageChunk, BaseMessage, HumanMessage,
35
35
  from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
36
36
  from langchain_core.runnables import run_in_executor
37
37
  from langchain_community.chat_models.openai import generate_from_stream, _convert_delta_to_message_chunk
38
- from ..clients import AlitaClient
38
+ from ..clients.client import AlitaClient
39
39
  from pydantic import Field, model_validator, field_validator, ValidationInfo
40
40
 
41
41
  logger = logging.getLogger(__name__)
@@ -1,46 +1,148 @@
1
+ import json
1
2
  import logging
2
3
  from traceback import format_exc
3
- from typing import Any, Optional, Dict, List
4
+ from typing import Any, Optional, Dict, List, Union
4
5
 
5
- from langchain_core.messages import HumanMessage, BaseMessage
6
+ from langchain_core.messages import HumanMessage, BaseMessage, SystemMessage, AIMessage
6
7
  from langchain_core.tools import BaseTool, ToolException
8
+ from langchain_core.runnables import RunnableConfig
9
+ from pydantic import Field
7
10
 
8
11
  from ..langchain.utils import _extract_json, create_pydantic_model, create_params
9
12
 
10
13
  logger = logging.getLogger(__name__)
11
14
 
12
15
 
13
- def create_llm_input(prompt: Dict[str, str], params: Dict[str, Any], kwargs: Dict[str, Any]) -> list[BaseMessage]:
14
- logger.info(f"Creating LLM input with prompt: {prompt}, params: {params}, kwargs: {kwargs}")
15
- if prompt.get('type') == 'fstring' and params:
16
+ def create_llm_input_with_messages(
17
+ prompt: Dict[str, str],
18
+ messages: List[BaseMessage],
19
+ params: Dict[str, Any]
20
+ ) -> List[BaseMessage]:
21
+ """
22
+ Create LLM input by combining system prompt with chat history messages.
23
+
24
+ Args:
25
+ prompt: The prompt configuration with template
26
+ messages: List of chat history messages
27
+ params: Additional parameters for prompt formatting
28
+
29
+ Returns:
30
+ List of messages to send to LLM
31
+ """
32
+ logger.info(f"Creating LLM input with messages: {len(messages)} messages, params: {params}")
33
+
34
+ # Build the input messages
35
+ input_messages = []
36
+
37
+ # Add system message from prompt if available
38
+ if prompt and prompt.get('template'):
16
39
  try:
17
- return [HumanMessage(content=prompt['value'].format(**params))]
40
+ system_content = prompt['template'].format(**params) if params else prompt['template']
41
+ input_messages.append(SystemMessage(content=system_content))
18
42
  except KeyError as e:
19
- error_msg = f"KeyError in input formatting - make sure you have added all required state variables as input to the node: {e}"
43
+ error_msg = f"KeyError in prompt formatting: {e}. Available params: {list(params.keys())}"
20
44
  logger.error(error_msg)
21
45
  raise ToolException(error_msg)
22
- else:
23
- return kwargs.get("messages") + [HumanMessage(prompt['value'])]
46
+
47
+ # Add the chat history messages
48
+ if messages:
49
+ input_messages.extend(messages)
50
+
51
+ return input_messages
24
52
 
25
53
 
26
54
  class LLMNode(BaseTool):
27
- name: str = 'LLMNode'
28
- prompt: Dict[str, str]
29
- description: str = 'This is tool node for LLM'
30
- client: Any = None
31
- return_type: str = "str"
32
- response_key: str = "messages"
33
- structured_output_dict: Optional[dict[str, str]] = None
34
- output_variables: Optional[List[str]] = None
35
- input_variables: Optional[List[str]] = None
36
- structured_output: Optional[bool] = False
55
+ """Enhanced LLM node with chat history and tool binding support"""
56
+
57
+ # Override BaseTool required fields
58
+ name: str = Field(default='LLMNode', description='Name of the LLM node')
59
+ description: str = Field(default='This is tool node for LLM with chat history and tool support', description='Description of the LLM node')
60
+
61
+ # LLM-specific fields
62
+ prompt: Dict[str, str] = Field(default_factory=dict, description='Prompt configuration')
63
+ client: Any = Field(default=None, description='LLM client instance')
64
+ return_type: str = Field(default="str", description='Return type')
65
+ response_key: str = Field(default="messages", description='Response key')
66
+ structured_output_dict: Optional[dict[str, str]] = Field(default=None, description='Structured output dictionary')
67
+ output_variables: Optional[List[str]] = Field(default=None, description='Output variables')
68
+ input_variables: Optional[List[str]] = Field(default=None, description='Input variables')
69
+ structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
70
+ available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
71
+ tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
37
72
 
38
- def _run(self, *args, **kwargs):
39
- params = create_params(self.input_variables, kwargs)
40
- logger.info(f"LLM Node params: {params}")
41
- llm_input = create_llm_input(self.prompt, params, kwargs)
73
+ def get_filtered_tools(self) -> List[BaseTool]:
74
+ """
75
+ Filter available tools based on tool_names list.
76
+
77
+ Returns:
78
+ List of filtered tools
79
+ """
80
+ if not self.available_tools:
81
+ return []
82
+
83
+ if not self.tool_names:
84
+ # If no specific tool names provided, return all available tools
85
+ return self.available_tools
86
+
87
+ # Filter tools by name
88
+ filtered_tools = []
89
+ available_tool_names = {tool.name: tool for tool in self.available_tools}
90
+
91
+ for tool_name in self.tool_names:
92
+ if tool_name in available_tool_names:
93
+ filtered_tools.append(available_tool_names[tool_name])
94
+ logger.debug(f"Added tool '{tool_name}' to LLM node")
95
+ else:
96
+ logger.warning(f"Tool '{tool_name}' not found in available tools: {list(available_tool_names.keys())}")
97
+
98
+ return filtered_tools
99
+
100
+ def invoke(
101
+ self,
102
+ state: Union[str, dict],
103
+ config: Optional[RunnableConfig] = None,
104
+ **kwargs: Any,
105
+ ) -> dict:
106
+ """
107
+ Invoke the LLM node with proper message handling and tool binding.
108
+
109
+ Args:
110
+ state: The current state containing messages and other variables
111
+ config: Optional runnable config
112
+ **kwargs: Additional keyword arguments
113
+
114
+ Returns:
115
+ Updated state with LLM response
116
+ """
117
+ # Extract messages from state
118
+
119
+ messages = state.get("messages", []) if isinstance(state, dict) else []
120
+ logger.info(f"Invoking LLMNode with {len(messages)} messages")
121
+ logger.info("Messages: %s", messages)
122
+ # Create parameters for prompt formatting from state
123
+ params = {}
124
+ if isinstance(state, dict):
125
+ for var in self.input_variables or []:
126
+ if var != "messages" and var in state:
127
+ params[var] = state[var]
128
+
129
+ # Create LLM input with proper message handling
130
+ llm_input = create_llm_input_with_messages(self.prompt, messages, params)
131
+
132
+ # Get the LLM client, potentially with tools bound
133
+ llm_client = self.client
134
+
135
+ if len(self.tool_names or []) > 0:
136
+ filtered_tools = self.get_filtered_tools()
137
+ if filtered_tools:
138
+ logger.info(f"Binding {len(filtered_tools)} tools to LLM: {[t.name for t in filtered_tools]}")
139
+ llm_client = self.client.bind_tools(filtered_tools)
140
+ else:
141
+ logger.warning("No tools to bind to LLM")
142
+
42
143
  try:
43
- if self.structured_output and len(self.output_variables) > 0:
144
+ if self.structured_output and self.output_variables:
145
+ # Handle structured output
44
146
  struct_params = {
45
147
  key: {
46
148
  "type": 'list[str]' if 'list' in value else value,
@@ -48,27 +150,144 @@ class LLMNode(BaseTool):
48
150
  }
49
151
  for key, value in (self.structured_output_dict or {}).items()
50
152
  }
51
- stuct_model = create_pydantic_model(f"LLMOutput", struct_params)
52
- llm = self.client.with_structured_output(stuct_model)
53
- completion = llm.invoke(llm_input)
153
+ struct_model = create_pydantic_model(f"LLMOutput", struct_params)
154
+ llm = llm_client.with_structured_output(struct_model)
155
+ completion = llm.invoke(llm_input, config=config)
54
156
  result = completion.model_dump()
157
+
158
+ # Ensure messages are properly formatted
55
159
  if result.get('messages') and isinstance(result['messages'], list):
56
160
  result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
161
+
57
162
  return result
58
163
  else:
59
- completion = self.client.invoke(llm_input)
60
- result = completion.content.strip()
61
- response = _extract_json(result) or {}
62
- response_data = {key: response[key] for key in response if key in self.output_variables}
63
- if not response_data.get('messages'):
64
- response_data['messages'] = [
65
- {"role": "assistant", "content": response_data.get(self.response_key) or result}]
66
- return response_data
67
- except ValueError:
68
- if self.output_variables:
69
- return {self.output_variables[0]: result, "messages": [{"role": "assistant", "content": result}]}
70
- else:
71
- return {"messages": [{"role": "assistant", "content": result}]}
164
+ # Handle regular completion
165
+ completion = llm_client.invoke(llm_input, config=config)
166
+ logger.info(f"Initial completion: {completion}")
167
+ # Handle both tool-calling and regular responses
168
+ if hasattr(completion, 'tool_calls') and completion.tool_calls:
169
+ # Handle iterative tool-calling and execution
170
+ new_messages = messages + [completion]
171
+ max_iterations = 15
172
+ iteration = 0
173
+
174
+ # Continue executing tools until no more tool calls or max iterations reached
175
+ current_completion = completion
176
+ while (hasattr(current_completion, 'tool_calls') and
177
+ current_completion.tool_calls and
178
+ iteration < max_iterations):
179
+
180
+ iteration += 1
181
+ logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
182
+
183
+ # Execute each tool call in the current completion
184
+ tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls, '__iter__') else []
185
+
186
+ for tool_call in tool_calls:
187
+ tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call, 'name', '')
188
+ tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call, 'args', {})
189
+ tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(tool_call, 'id', '')
190
+
191
+ # Find the tool in filtered tools
192
+ filtered_tools = self.get_filtered_tools()
193
+ tool_to_execute = None
194
+ for tool in filtered_tools:
195
+ if tool.name == tool_name:
196
+ tool_to_execute = tool
197
+ break
198
+
199
+ if tool_to_execute:
200
+ try:
201
+ logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
202
+ tool_result = tool_to_execute.invoke(tool_args)
203
+
204
+ # Create tool message with result
205
+ from langchain_core.messages import ToolMessage
206
+ tool_message = ToolMessage(
207
+ content=str(tool_result),
208
+ tool_call_id=tool_call_id
209
+ )
210
+ new_messages.append(tool_message)
211
+
212
+ except Exception as e:
213
+ logger.error(f"Error executing tool '{tool_name}': {e}")
214
+ # Create error tool message
215
+ from langchain_core.messages import ToolMessage
216
+ tool_message = ToolMessage(
217
+ content=f"Error executing {tool_name}: {str(e)}",
218
+ tool_call_id=tool_call_id
219
+ )
220
+ new_messages.append(tool_message)
221
+ else:
222
+ logger.warning(f"Tool '{tool_name}' not found in available tools")
223
+ # Create error tool message for missing tool
224
+ from langchain_core.messages import ToolMessage
225
+ tool_message = ToolMessage(
226
+ content=f"Tool '{tool_name}' not available",
227
+ tool_call_id=tool_call_id
228
+ )
229
+ new_messages.append(tool_message)
230
+
231
+ # Call LLM again with tool results to get next response
232
+ try:
233
+ current_completion = llm_client.invoke(new_messages, config=config)
234
+ new_messages.append(current_completion)
235
+
236
+ # Check if we still have tool calls
237
+ if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
238
+ logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
239
+ else:
240
+ logger.info("LLM completed without requesting more tools")
241
+ break
242
+
243
+ except Exception as e:
244
+ logger.error(f"Error in LLM call during iteration {iteration}: {e}")
245
+ # Add error message and break the loop
246
+ error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
247
+ new_messages.append(AIMessage(content=error_msg))
248
+ break
249
+
250
+ # Log completion status
251
+ if iteration >= max_iterations:
252
+ logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
253
+ # Add a warning message to the chat
254
+ warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
255
+ new_messages.append(AIMessage(content=warning_msg))
256
+ else:
257
+ logger.info(f"Tool execution completed after {iteration} iterations")
258
+
259
+ return {"messages": new_messages}
260
+ else:
261
+ # Regular text response
262
+ content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
263
+
264
+ # Try to extract JSON if output variables are specified (but exclude 'messages' which is handled separately)
265
+ json_output_vars = [var for var in (self.output_variables or []) if var != 'messages']
266
+ if json_output_vars:
267
+ try:
268
+ response = _extract_json(content) or {}
269
+ response_data = {key: response.get(key) for key in json_output_vars if key in response}
270
+
271
+ # Always add the messages to the response
272
+ new_messages = messages + [AIMessage(content=content)]
273
+ response_data['messages'] = new_messages
274
+
275
+ return response_data
276
+ except (ValueError, json.JSONDecodeError) as e:
277
+ # LLM returned non-JSON content, treat as plain text
278
+ logger.warning(f"Expected JSON output but got plain text. Output variables specified: {json_output_vars}. Error: {e}")
279
+ # Fall through to plain text handling
280
+
281
+ # Simple text response (either no output variables or JSON parsing failed)
282
+ new_messages = messages + [AIMessage(content=content)]
283
+ return {"messages": new_messages}
284
+
72
285
  except Exception as e:
73
286
  logger.error(f"Error in LLM Node: {format_exc()}")
74
- return {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
287
+ error_msg = f"Error: {e}"
288
+ new_messages = messages + [AIMessage(content=error_msg)]
289
+ return {"messages": new_messages}
290
+
291
+ def _run(self, *args, **kwargs):
292
+ # Legacy support for old interface
293
+ return self.invoke(kwargs, **kwargs)
@@ -35,6 +35,21 @@ class McpServerTool(BaseTool):
35
35
  field_type = float
36
36
  elif field_type == 'boolean':
37
37
  field_type = bool
38
+ elif field_type == 'object':#Dict[str, Any]
39
+ nested_model = McpServerTool.create_pydantic_model_from_schema(field_info)
40
+ field_type = nested_model
41
+ elif field_type == 'array':
42
+ item_schema = field_info['items']
43
+ item_type = McpServerTool.create_pydantic_model_from_schema(item_schema) if item_schema['type'] == 'object' else (
44
+ str if item_schema['type'] == 'string' else
45
+ int if item_schema['type'] == 'integer' else
46
+ float if item_schema['type'] == 'number' else
47
+ bool if item_schema['type'] == 'boolean' else
48
+ None
49
+ )
50
+ if item_type is None:
51
+ raise ValueError(f"Unsupported array item type: {item_schema['type']}")
52
+ field_type = list[item_type]
38
53
  else:
39
54
  raise ValueError(f"Unsupported field type: {field_type}")
40
55