alita-sdk 0.3.351__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +1 -1
- alita_sdk/runtime/clients/client.py +214 -42
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +118 -30
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +41 -12
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +116 -99
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +2 -2
- alita_sdk/runtime/langchain/langraph_agent.py +307 -71
- alita_sdk/runtime/langchain/utils.py +48 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +18 -6
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/tools.py +205 -55
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +11 -3
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/artifact.py +225 -12
- alita_sdk/runtime/tools/function.py +95 -5
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +494 -102
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +4 -4
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +180 -79
- alita_sdk/runtime/tools/vectorstore.py +22 -21
- alita_sdk/runtime/tools/vectorstore_base.py +125 -52
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +12 -0
- alita_sdk/tools/__init__.py +77 -33
- alita_sdk/tools/ado/repos/__init__.py +7 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +11 -11
- alita_sdk/tools/ado/test_plan/__init__.py +7 -7
- alita_sdk/tools/ado/wiki/__init__.py +7 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +89 -15
- alita_sdk/tools/ado/work_item/__init__.py +7 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +8 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +11 -9
- alita_sdk/tools/azure_ai/search/__init__.py +7 -6
- alita_sdk/tools/base_indexer_toolkit.py +345 -70
- alita_sdk/tools/bitbucket/__init__.py +9 -8
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -6
- alita_sdk/tools/browser/__init__.py +4 -4
- alita_sdk/tools/carrier/__init__.py +4 -6
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +7 -6
- alita_sdk/tools/cloud/azure/__init__.py +7 -6
- alita_sdk/tools/cloud/gcp/__init__.py +7 -6
- alita_sdk/tools/cloud/k8s/__init__.py +7 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +8 -7
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +9 -8
- alita_sdk/tools/confluence/api_wrapper.py +171 -75
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +492 -52
- alita_sdk/tools/figma/__init__.py +7 -7
- alita_sdk/tools/figma/api_wrapper.py +2 -1
- alita_sdk/tools/github/__init__.py +9 -9
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +62 -2
- alita_sdk/tools/gitlab/__init__.py +8 -8
- alita_sdk/tools/gitlab/api_wrapper.py +135 -33
- alita_sdk/tools/gitlab_org/__init__.py +7 -8
- alita_sdk/tools/google/bigquery/__init__.py +11 -12
- alita_sdk/tools/google_places/__init__.py +8 -7
- alita_sdk/tools/jira/__init__.py +9 -7
- alita_sdk/tools/jira/api_wrapper.py +100 -52
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +1 -1
- alita_sdk/tools/non_code_indexer_toolkit.py +3 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/postman/__init__.py +7 -8
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +16 -11
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +7 -8
- alita_sdk/tools/report_portal/__init__.py +9 -7
- alita_sdk/tools/salesforce/__init__.py +7 -7
- alita_sdk/tools/servicenow/__init__.py +10 -10
- alita_sdk/tools/sharepoint/__init__.py +7 -6
- alita_sdk/tools/sharepoint/api_wrapper.py +127 -36
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +7 -6
- alita_sdk/tools/sql/__init__.py +8 -7
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +7 -6
- alita_sdk/tools/testrail/__init__.py +8 -9
- alita_sdk/tools/utils/__init__.py +26 -4
- alita_sdk/tools/utils/content_parser.py +88 -60
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +76 -26
- alita_sdk/tools/xray/__init__.py +9 -7
- alita_sdk/tools/zephyr/__init__.py +7 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +8 -6
- alita_sdk/tools/zephyr_essential/__init__.py +7 -6
- alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
- alita_sdk/tools/zephyr_scale/__init__.py +7 -6
- alita_sdk/tools/zephyr_squad/__init__.py +7 -6
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +147 -2
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/RECORD +206 -130
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
alita_sdk/runtime/tools/llm.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import logging
|
|
2
3
|
from traceback import format_exc
|
|
3
4
|
from typing import Any, Optional, List, Union
|
|
@@ -7,10 +8,12 @@ from langchain_core.runnables import RunnableConfig
|
|
|
7
8
|
from langchain_core.tools import BaseTool, ToolException
|
|
8
9
|
from pydantic import Field
|
|
9
10
|
|
|
11
|
+
from ..langchain.constants import ELITEA_RS
|
|
10
12
|
from ..langchain.utils import create_pydantic_model, propagate_the_input_mapping
|
|
11
13
|
|
|
12
14
|
logger = logging.getLogger(__name__)
|
|
13
15
|
|
|
16
|
+
|
|
14
17
|
class LLMNode(BaseTool):
|
|
15
18
|
"""Enhanced LLM node with chat history and tool binding support"""
|
|
16
19
|
|
|
@@ -30,6 +33,8 @@ class LLMNode(BaseTool):
|
|
|
30
33
|
structured_output: Optional[bool] = Field(default=False, description='Whether to use structured output')
|
|
31
34
|
available_tools: Optional[List[BaseTool]] = Field(default=None, description='Available tools for binding')
|
|
32
35
|
tool_names: Optional[List[str]] = Field(default=None, description='Specific tool names to filter')
|
|
36
|
+
steps_limit: Optional[int] = Field(default=25, description='Maximum steps for tool execution')
|
|
37
|
+
tool_execution_timeout: Optional[int] = Field(default=900, description='Timeout (seconds) for tool execution. Default is 15 minutes.')
|
|
33
38
|
|
|
34
39
|
def get_filtered_tools(self) -> List[BaseTool]:
|
|
35
40
|
"""
|
|
@@ -58,6 +63,47 @@ class LLMNode(BaseTool):
|
|
|
58
63
|
|
|
59
64
|
return filtered_tools
|
|
60
65
|
|
|
66
|
+
def _get_tool_truncation_suggestions(self, tool_name: Optional[str]) -> str:
|
|
67
|
+
"""
|
|
68
|
+
Get context-specific suggestions for how to reduce output from a tool.
|
|
69
|
+
|
|
70
|
+
First checks if the tool itself provides truncation suggestions via
|
|
71
|
+
`truncation_suggestions` attribute or `get_truncation_suggestions()` method.
|
|
72
|
+
Falls back to generic suggestions if the tool doesn't provide any.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
tool_name: Name of the tool that caused the context overflow
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Formatted string with numbered suggestions for the specific tool
|
|
79
|
+
"""
|
|
80
|
+
suggestions = None
|
|
81
|
+
|
|
82
|
+
# Try to get suggestions from the tool itself
|
|
83
|
+
if tool_name:
|
|
84
|
+
filtered_tools = self.get_filtered_tools()
|
|
85
|
+
for tool in filtered_tools:
|
|
86
|
+
if tool.name == tool_name:
|
|
87
|
+
# Check for truncation_suggestions attribute
|
|
88
|
+
if hasattr(tool, 'truncation_suggestions') and tool.truncation_suggestions:
|
|
89
|
+
suggestions = tool.truncation_suggestions
|
|
90
|
+
break
|
|
91
|
+
# Check for get_truncation_suggestions method
|
|
92
|
+
elif hasattr(tool, 'get_truncation_suggestions') and callable(tool.get_truncation_suggestions):
|
|
93
|
+
suggestions = tool.get_truncation_suggestions()
|
|
94
|
+
break
|
|
95
|
+
|
|
96
|
+
# Fall back to generic suggestions if tool doesn't provide any
|
|
97
|
+
if not suggestions:
|
|
98
|
+
suggestions = [
|
|
99
|
+
"Check if the tool has parameters to limit output size (e.g., max_items, max_results, max_depth)",
|
|
100
|
+
"Target a more specific path or query instead of broad searches",
|
|
101
|
+
"Break the operation into smaller, focused requests",
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
# Format as numbered list
|
|
105
|
+
return "\n".join(f"{i+1}. {s}" for i, s in enumerate(suggestions))
|
|
106
|
+
|
|
61
107
|
def invoke(
|
|
62
108
|
self,
|
|
63
109
|
state: Union[str, dict],
|
|
@@ -84,18 +130,24 @@ class LLMNode(BaseTool):
|
|
|
84
130
|
# or standalone LLM node for chat (with messages only)
|
|
85
131
|
if 'system' in func_args.keys():
|
|
86
132
|
# Flow for LLM node with prompt/task from pipeline
|
|
87
|
-
if
|
|
133
|
+
if func_args.get('system') is None or func_args.get('task') is None:
|
|
134
|
+
raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
|
|
135
|
+
f"Actual params: {func_args}")
|
|
88
136
|
raise ToolException(f"LLMNode requires 'system' and 'task' parameters in input mapping. "
|
|
89
137
|
f"Actual params: {func_args}")
|
|
90
|
-
|
|
91
|
-
messages.
|
|
138
|
+
# cast to str in case user passes variable different from str
|
|
139
|
+
messages = [SystemMessage(content=str(func_args.get('system'))), *func_args.get('chat_history', []), HumanMessage(content=str(func_args.get('task')))]
|
|
140
|
+
# Remove pre-last item if last two messages are same type and content
|
|
141
|
+
if len(messages) >= 2 and type(messages[-1]) == type(messages[-2]) and messages[-1].content == messages[
|
|
142
|
+
-2].content:
|
|
143
|
+
messages.pop(-2)
|
|
92
144
|
else:
|
|
93
145
|
# Flow for chat-based LLM node w/o prompt/task from pipeline but with messages in state
|
|
94
146
|
# verify messages structure
|
|
95
147
|
messages = state.get("messages", []) if isinstance(state, dict) else []
|
|
96
148
|
if messages:
|
|
97
149
|
# the last message has to be HumanMessage
|
|
98
|
-
if not isinstance(messages[
|
|
150
|
+
if not isinstance(messages[-1], HumanMessage):
|
|
99
151
|
raise ToolException("LLMNode requires the last message to be a HumanMessage")
|
|
100
152
|
else:
|
|
101
153
|
raise ToolException("LLMNode requires 'messages' in state for chat-based interaction")
|
|
@@ -121,14 +173,27 @@ class LLMNode(BaseTool):
|
|
|
121
173
|
}
|
|
122
174
|
for key, value in (self.structured_output_dict or {}).items()
|
|
123
175
|
}
|
|
176
|
+
# Add default output field for proper response to user
|
|
177
|
+
struct_params['elitea_response'] = {'description': 'final output to user', 'type': 'str'}
|
|
124
178
|
struct_model = create_pydantic_model(f"LLMOutput", struct_params)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
179
|
+
completion = llm_client.invoke(messages, config=config)
|
|
180
|
+
if hasattr(completion, 'tool_calls') and completion.tool_calls:
|
|
181
|
+
new_messages, _ = self._run_async_in_sync_context(
|
|
182
|
+
self.__perform_tool_calling(completion, messages, llm_client, config)
|
|
183
|
+
)
|
|
184
|
+
llm = self.__get_struct_output_model(llm_client, struct_model)
|
|
185
|
+
completion = llm.invoke(new_messages, config=config)
|
|
186
|
+
result = completion.model_dump()
|
|
187
|
+
else:
|
|
188
|
+
llm = self.__get_struct_output_model(llm_client, struct_model)
|
|
189
|
+
completion = llm.invoke(messages, config=config)
|
|
190
|
+
result = completion.model_dump()
|
|
128
191
|
|
|
129
192
|
# Ensure messages are properly formatted
|
|
130
193
|
if result.get('messages') and isinstance(result['messages'], list):
|
|
131
194
|
result['messages'] = [{'role': 'assistant', 'content': '\n'.join(result['messages'])}]
|
|
195
|
+
else:
|
|
196
|
+
result['messages'] = messages + [AIMessage(content=result.get(ELITEA_RS, ''))]
|
|
132
197
|
|
|
133
198
|
return result
|
|
134
199
|
else:
|
|
@@ -138,102 +203,17 @@ class LLMNode(BaseTool):
|
|
|
138
203
|
# Handle both tool-calling and regular responses
|
|
139
204
|
if hasattr(completion, 'tool_calls') and completion.tool_calls:
|
|
140
205
|
# Handle iterative tool-calling and execution
|
|
141
|
-
new_messages =
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
# Continue executing tools until no more tool calls or max iterations reached
|
|
146
|
-
current_completion = completion
|
|
147
|
-
while (hasattr(current_completion, 'tool_calls') and
|
|
148
|
-
current_completion.tool_calls and
|
|
149
|
-
iteration < max_iterations):
|
|
150
|
-
|
|
151
|
-
iteration += 1
|
|
152
|
-
logger.info(f"Tool execution iteration {iteration}/{max_iterations}")
|
|
153
|
-
|
|
154
|
-
# Execute each tool call in the current completion
|
|
155
|
-
tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
|
|
156
|
-
'__iter__') else []
|
|
157
|
-
|
|
158
|
-
for tool_call in tool_calls:
|
|
159
|
-
tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
|
|
160
|
-
'name',
|
|
161
|
-
'')
|
|
162
|
-
tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
|
|
163
|
-
'args',
|
|
164
|
-
{})
|
|
165
|
-
tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
|
|
166
|
-
tool_call, 'id', '')
|
|
167
|
-
|
|
168
|
-
# Find the tool in filtered tools
|
|
169
|
-
filtered_tools = self.get_filtered_tools()
|
|
170
|
-
tool_to_execute = None
|
|
171
|
-
for tool in filtered_tools:
|
|
172
|
-
if tool.name == tool_name:
|
|
173
|
-
tool_to_execute = tool
|
|
174
|
-
break
|
|
206
|
+
new_messages, current_completion = self._run_async_in_sync_context(
|
|
207
|
+
self.__perform_tool_calling(completion, messages, llm_client, config)
|
|
208
|
+
)
|
|
175
209
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
# Create tool message with result
|
|
182
|
-
from langchain_core.messages import ToolMessage
|
|
183
|
-
tool_message = ToolMessage(
|
|
184
|
-
content=str(tool_result),
|
|
185
|
-
tool_call_id=tool_call_id
|
|
186
|
-
)
|
|
187
|
-
new_messages.append(tool_message)
|
|
188
|
-
|
|
189
|
-
except Exception as e:
|
|
190
|
-
logger.error(f"Error executing tool '{tool_name}': {e}")
|
|
191
|
-
# Create error tool message
|
|
192
|
-
from langchain_core.messages import ToolMessage
|
|
193
|
-
tool_message = ToolMessage(
|
|
194
|
-
content=f"Error executing {tool_name}: {str(e)}",
|
|
195
|
-
tool_call_id=tool_call_id
|
|
196
|
-
)
|
|
197
|
-
new_messages.append(tool_message)
|
|
198
|
-
else:
|
|
199
|
-
logger.warning(f"Tool '{tool_name}' not found in available tools")
|
|
200
|
-
# Create error tool message for missing tool
|
|
201
|
-
from langchain_core.messages import ToolMessage
|
|
202
|
-
tool_message = ToolMessage(
|
|
203
|
-
content=f"Tool '{tool_name}' not available",
|
|
204
|
-
tool_call_id=tool_call_id
|
|
205
|
-
)
|
|
206
|
-
new_messages.append(tool_message)
|
|
207
|
-
|
|
208
|
-
# Call LLM again with tool results to get next response
|
|
209
|
-
try:
|
|
210
|
-
current_completion = llm_client.invoke(new_messages, config=config)
|
|
211
|
-
new_messages.append(current_completion)
|
|
212
|
-
|
|
213
|
-
# Check if we still have tool calls
|
|
214
|
-
if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
|
|
215
|
-
logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
|
|
216
|
-
else:
|
|
217
|
-
logger.info("LLM completed without requesting more tools")
|
|
218
|
-
break
|
|
219
|
-
|
|
220
|
-
except Exception as e:
|
|
221
|
-
logger.error(f"Error in LLM call during iteration {iteration}: {e}")
|
|
222
|
-
# Add error message and break the loop
|
|
223
|
-
error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
|
|
224
|
-
new_messages.append(AIMessage(content=error_msg))
|
|
225
|
-
break
|
|
226
|
-
|
|
227
|
-
# Log completion status
|
|
228
|
-
if iteration >= max_iterations:
|
|
229
|
-
logger.warning(f"Reached maximum iterations ({max_iterations}) for tool execution")
|
|
230
|
-
# Add a warning message to the chat
|
|
231
|
-
warning_msg = f"Maximum tool execution iterations ({max_iterations}) reached. Stopping tool execution."
|
|
232
|
-
new_messages.append(AIMessage(content=warning_msg))
|
|
233
|
-
else:
|
|
234
|
-
logger.info(f"Tool execution completed after {iteration} iterations")
|
|
210
|
+
output_msgs = {"messages": new_messages}
|
|
211
|
+
if self.output_variables:
|
|
212
|
+
if self.output_variables[0] == 'messages':
|
|
213
|
+
return output_msgs
|
|
214
|
+
output_msgs[self.output_variables[0]] = current_completion.content if current_completion else None
|
|
235
215
|
|
|
236
|
-
return
|
|
216
|
+
return output_msgs
|
|
237
217
|
else:
|
|
238
218
|
# Regular text response
|
|
239
219
|
content = completion.content.strip() if hasattr(completion, 'content') else str(completion)
|
|
@@ -259,4 +239,416 @@ class LLMNode(BaseTool):
|
|
|
259
239
|
|
|
260
240
|
def _run(self, *args, **kwargs):
|
|
261
241
|
# Legacy support for old interface
|
|
262
|
-
return self.invoke(kwargs, **kwargs)
|
|
242
|
+
return self.invoke(kwargs, **kwargs)
|
|
243
|
+
|
|
244
|
+
def _run_async_in_sync_context(self, coro):
|
|
245
|
+
"""Run async coroutine from sync context.
|
|
246
|
+
|
|
247
|
+
For MCP tools with persistent sessions, we reuse the same event loop
|
|
248
|
+
that was used to create the MCP client and sessions (set by CLI).
|
|
249
|
+
|
|
250
|
+
When called from within a running event loop (e.g., nested LLM nodes),
|
|
251
|
+
we need to handle this carefully to avoid "event loop already running" errors.
|
|
252
|
+
|
|
253
|
+
This method handles three scenarios:
|
|
254
|
+
1. Called from async context (event loop running) - creates new thread with new loop
|
|
255
|
+
2. Called from sync context with persistent loop - reuses persistent loop
|
|
256
|
+
3. Called from sync context without loop - creates new persistent loop
|
|
257
|
+
"""
|
|
258
|
+
import threading
|
|
259
|
+
|
|
260
|
+
# Check if there's a running loop
|
|
261
|
+
try:
|
|
262
|
+
running_loop = asyncio.get_running_loop()
|
|
263
|
+
loop_is_running = True
|
|
264
|
+
logger.debug(f"Detected running event loop (id: {id(running_loop)}), executing tool calls in separate thread")
|
|
265
|
+
except RuntimeError:
|
|
266
|
+
loop_is_running = False
|
|
267
|
+
|
|
268
|
+
# Scenario 1: Loop is currently running - MUST use thread
|
|
269
|
+
if loop_is_running:
|
|
270
|
+
result_container = []
|
|
271
|
+
exception_container = []
|
|
272
|
+
|
|
273
|
+
# Try to capture Streamlit context from current thread for propagation
|
|
274
|
+
streamlit_ctx = None
|
|
275
|
+
try:
|
|
276
|
+
from streamlit.runtime.scriptrunner import get_script_run_ctx, add_script_run_ctx
|
|
277
|
+
streamlit_ctx = get_script_run_ctx()
|
|
278
|
+
if streamlit_ctx:
|
|
279
|
+
logger.debug("Captured Streamlit context for propagation to worker thread")
|
|
280
|
+
except (ImportError, Exception) as e:
|
|
281
|
+
logger.debug(f"Streamlit context not available or failed to capture: {e}")
|
|
282
|
+
|
|
283
|
+
def run_in_thread():
|
|
284
|
+
"""Run coroutine in a new thread with its own event loop."""
|
|
285
|
+
new_loop = asyncio.new_event_loop()
|
|
286
|
+
asyncio.set_event_loop(new_loop)
|
|
287
|
+
try:
|
|
288
|
+
result = new_loop.run_until_complete(coro)
|
|
289
|
+
result_container.append(result)
|
|
290
|
+
except Exception as e:
|
|
291
|
+
logger.debug(f"Exception in async thread: {e}")
|
|
292
|
+
exception_container.append(e)
|
|
293
|
+
finally:
|
|
294
|
+
new_loop.close()
|
|
295
|
+
asyncio.set_event_loop(None)
|
|
296
|
+
|
|
297
|
+
thread = threading.Thread(target=run_in_thread, daemon=False)
|
|
298
|
+
|
|
299
|
+
# Propagate Streamlit context to the worker thread if available
|
|
300
|
+
if streamlit_ctx is not None:
|
|
301
|
+
try:
|
|
302
|
+
add_script_run_ctx(thread, streamlit_ctx)
|
|
303
|
+
logger.debug("Successfully propagated Streamlit context to worker thread")
|
|
304
|
+
except Exception as e:
|
|
305
|
+
logger.warning(f"Failed to propagate Streamlit context to worker thread: {e}")
|
|
306
|
+
|
|
307
|
+
thread.start()
|
|
308
|
+
thread.join(timeout=self.tool_execution_timeout) # 15 minute timeout for safety
|
|
309
|
+
|
|
310
|
+
if thread.is_alive():
|
|
311
|
+
logger.error("Async operation timed out after 5 minutes")
|
|
312
|
+
raise TimeoutError("Async operation in thread timed out")
|
|
313
|
+
|
|
314
|
+
# Re-raise exception if one occurred
|
|
315
|
+
if exception_container:
|
|
316
|
+
raise exception_container[0]
|
|
317
|
+
|
|
318
|
+
return result_container[0] if result_container else None
|
|
319
|
+
|
|
320
|
+
# Scenario 2 & 3: No loop running - use or create persistent loop
|
|
321
|
+
else:
|
|
322
|
+
# Get or create persistent loop
|
|
323
|
+
if not hasattr(self.__class__, '_persistent_loop') or \
|
|
324
|
+
self.__class__._persistent_loop is None or \
|
|
325
|
+
self.__class__._persistent_loop.is_closed():
|
|
326
|
+
self.__class__._persistent_loop = asyncio.new_event_loop()
|
|
327
|
+
logger.debug("Created persistent event loop for async tools")
|
|
328
|
+
|
|
329
|
+
loop = self.__class__._persistent_loop
|
|
330
|
+
|
|
331
|
+
# Double-check the loop is not running (safety check)
|
|
332
|
+
if loop.is_running():
|
|
333
|
+
logger.debug("Persistent loop is unexpectedly running, using thread execution")
|
|
334
|
+
|
|
335
|
+
result_container = []
|
|
336
|
+
exception_container = []
|
|
337
|
+
|
|
338
|
+
# Try to capture Streamlit context from current thread for propagation
|
|
339
|
+
streamlit_ctx = None
|
|
340
|
+
try:
|
|
341
|
+
from streamlit.runtime.scriptrunner import get_script_run_ctx, add_script_run_ctx
|
|
342
|
+
streamlit_ctx = get_script_run_ctx()
|
|
343
|
+
if streamlit_ctx:
|
|
344
|
+
logger.debug("Captured Streamlit context for propagation to worker thread")
|
|
345
|
+
except (ImportError, Exception) as e:
|
|
346
|
+
logger.debug(f"Streamlit context not available or failed to capture: {e}")
|
|
347
|
+
|
|
348
|
+
def run_in_thread():
|
|
349
|
+
"""Run coroutine in a new thread with its own event loop."""
|
|
350
|
+
new_loop = asyncio.new_event_loop()
|
|
351
|
+
asyncio.set_event_loop(new_loop)
|
|
352
|
+
try:
|
|
353
|
+
result = new_loop.run_until_complete(coro)
|
|
354
|
+
result_container.append(result)
|
|
355
|
+
except Exception as ex:
|
|
356
|
+
logger.debug(f"Exception in async thread: {ex}")
|
|
357
|
+
exception_container.append(ex)
|
|
358
|
+
finally:
|
|
359
|
+
new_loop.close()
|
|
360
|
+
asyncio.set_event_loop(None)
|
|
361
|
+
|
|
362
|
+
thread = threading.Thread(target=run_in_thread, daemon=False)
|
|
363
|
+
|
|
364
|
+
# Propagate Streamlit context to the worker thread if available
|
|
365
|
+
if streamlit_ctx is not None:
|
|
366
|
+
try:
|
|
367
|
+
add_script_run_ctx(thread, streamlit_ctx)
|
|
368
|
+
logger.debug("Successfully propagated Streamlit context to worker thread")
|
|
369
|
+
except Exception as e:
|
|
370
|
+
logger.warning(f"Failed to propagate Streamlit context to worker thread: {e}")
|
|
371
|
+
|
|
372
|
+
thread.start()
|
|
373
|
+
thread.join(timeout=self.tool_execution_timeout)
|
|
374
|
+
|
|
375
|
+
if thread.is_alive():
|
|
376
|
+
logger.error("Async operation timed out after 15 minutes")
|
|
377
|
+
raise TimeoutError("Async operation in thread timed out")
|
|
378
|
+
|
|
379
|
+
if exception_container:
|
|
380
|
+
raise exception_container[0]
|
|
381
|
+
|
|
382
|
+
return result_container[0] if result_container else None
|
|
383
|
+
else:
|
|
384
|
+
# Loop exists but not running - safe to use run_until_complete
|
|
385
|
+
logger.debug(f"Using persistent loop (id: {id(loop)}) with run_until_complete")
|
|
386
|
+
asyncio.set_event_loop(loop)
|
|
387
|
+
return loop.run_until_complete(coro)
|
|
388
|
+
|
|
389
|
+
async def _arun(self, *args, **kwargs):
|
|
390
|
+
# Legacy async support
|
|
391
|
+
return self.invoke(kwargs, **kwargs)
|
|
392
|
+
|
|
393
|
+
async def __perform_tool_calling(self, completion, messages, llm_client, config):
|
|
394
|
+
# Handle iterative tool-calling and execution
|
|
395
|
+
logger.info(f"__perform_tool_calling called with {len(completion.tool_calls) if hasattr(completion, 'tool_calls') else 0} tool calls")
|
|
396
|
+
new_messages = messages + [completion]
|
|
397
|
+
iteration = 0
|
|
398
|
+
|
|
399
|
+
# Continue executing tools until no more tool calls or max iterations reached
|
|
400
|
+
current_completion = completion
|
|
401
|
+
while (hasattr(current_completion, 'tool_calls') and
|
|
402
|
+
current_completion.tool_calls and
|
|
403
|
+
iteration < self.steps_limit):
|
|
404
|
+
|
|
405
|
+
iteration += 1
|
|
406
|
+
logger.info(f"Tool execution iteration {iteration}/{self.steps_limit}")
|
|
407
|
+
|
|
408
|
+
# Execute each tool call in the current completion
|
|
409
|
+
tool_calls = current_completion.tool_calls if hasattr(current_completion.tool_calls,
|
|
410
|
+
'__iter__') else []
|
|
411
|
+
|
|
412
|
+
for tool_call in tool_calls:
|
|
413
|
+
tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call,
|
|
414
|
+
'name',
|
|
415
|
+
'')
|
|
416
|
+
tool_args = tool_call.get('args', {}) if isinstance(tool_call, dict) else getattr(tool_call,
|
|
417
|
+
'args',
|
|
418
|
+
{})
|
|
419
|
+
tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(
|
|
420
|
+
tool_call, 'id', '')
|
|
421
|
+
|
|
422
|
+
# Find the tool in filtered tools
|
|
423
|
+
filtered_tools = self.get_filtered_tools()
|
|
424
|
+
tool_to_execute = None
|
|
425
|
+
for tool in filtered_tools:
|
|
426
|
+
if tool.name == tool_name:
|
|
427
|
+
tool_to_execute = tool
|
|
428
|
+
break
|
|
429
|
+
|
|
430
|
+
if tool_to_execute:
|
|
431
|
+
try:
|
|
432
|
+
logger.info(f"Executing tool '{tool_name}' with args: {tool_args}")
|
|
433
|
+
|
|
434
|
+
# Try async invoke first (for MCP tools), fallback to sync
|
|
435
|
+
tool_result = None
|
|
436
|
+
if hasattr(tool_to_execute, 'ainvoke'):
|
|
437
|
+
try:
|
|
438
|
+
tool_result = await tool_to_execute.ainvoke(tool_args, config=config)
|
|
439
|
+
except (NotImplementedError, AttributeError):
|
|
440
|
+
logger.debug(f"Tool '{tool_name}' ainvoke failed, falling back to sync invoke")
|
|
441
|
+
tool_result = tool_to_execute.invoke(tool_args, config=config)
|
|
442
|
+
else:
|
|
443
|
+
# Sync-only tool
|
|
444
|
+
tool_result = tool_to_execute.invoke(tool_args, config=config)
|
|
445
|
+
|
|
446
|
+
# Create tool message with result - preserve structured content
|
|
447
|
+
from langchain_core.messages import ToolMessage
|
|
448
|
+
|
|
449
|
+
# Check if tool_result is structured content (list of dicts)
|
|
450
|
+
# TODO: need solid check for being compatible with ToolMessage content format
|
|
451
|
+
if isinstance(tool_result, list) and all(
|
|
452
|
+
isinstance(item, dict) and 'type' in item for item in tool_result
|
|
453
|
+
):
|
|
454
|
+
# Use structured content directly for multimodal support
|
|
455
|
+
tool_message = ToolMessage(
|
|
456
|
+
content=tool_result,
|
|
457
|
+
tool_call_id=tool_call_id
|
|
458
|
+
)
|
|
459
|
+
else:
|
|
460
|
+
# Fallback to string conversion for other tool results
|
|
461
|
+
tool_message = ToolMessage(
|
|
462
|
+
content=str(tool_result),
|
|
463
|
+
tool_call_id=tool_call_id
|
|
464
|
+
)
|
|
465
|
+
new_messages.append(tool_message)
|
|
466
|
+
|
|
467
|
+
except Exception as e:
|
|
468
|
+
import traceback
|
|
469
|
+
error_details = traceback.format_exc()
|
|
470
|
+
# Use debug level to avoid duplicate output when CLI callbacks are active
|
|
471
|
+
logger.debug(f"Error executing tool '{tool_name}': {e}\n{error_details}")
|
|
472
|
+
# Create error tool message
|
|
473
|
+
from langchain_core.messages import ToolMessage
|
|
474
|
+
tool_message = ToolMessage(
|
|
475
|
+
content=f"Error executing {tool_name}: {str(e)}",
|
|
476
|
+
tool_call_id=tool_call_id
|
|
477
|
+
)
|
|
478
|
+
new_messages.append(tool_message)
|
|
479
|
+
else:
|
|
480
|
+
logger.warning(f"Tool '{tool_name}' not found in available tools")
|
|
481
|
+
# Create error tool message for missing tool
|
|
482
|
+
from langchain_core.messages import ToolMessage
|
|
483
|
+
tool_message = ToolMessage(
|
|
484
|
+
content=f"Tool '{tool_name}' not available",
|
|
485
|
+
tool_call_id=tool_call_id
|
|
486
|
+
)
|
|
487
|
+
new_messages.append(tool_message)
|
|
488
|
+
|
|
489
|
+
# Call LLM again with tool results to get next response
|
|
490
|
+
try:
|
|
491
|
+
current_completion = llm_client.invoke(new_messages, config=config)
|
|
492
|
+
new_messages.append(current_completion)
|
|
493
|
+
|
|
494
|
+
# Check if we still have tool calls
|
|
495
|
+
if hasattr(current_completion, 'tool_calls') and current_completion.tool_calls:
|
|
496
|
+
logger.info(f"LLM requested {len(current_completion.tool_calls)} more tool calls")
|
|
497
|
+
else:
|
|
498
|
+
logger.info("LLM completed without requesting more tools")
|
|
499
|
+
break
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
error_str = str(e).lower()
|
|
503
|
+
|
|
504
|
+
# Check for context window / token limit errors
|
|
505
|
+
is_context_error = any(indicator in error_str for indicator in [
|
|
506
|
+
'context window', 'context_window', 'token limit', 'too long',
|
|
507
|
+
'maximum context length', 'input is too long', 'exceeds the limit',
|
|
508
|
+
'contextwindowexceedederror', 'max_tokens', 'content too large'
|
|
509
|
+
])
|
|
510
|
+
|
|
511
|
+
# Check for Bedrock/Claude output limit errors
|
|
512
|
+
# These often manifest as "model identifier is invalid" when output exceeds limits
|
|
513
|
+
is_output_limit_error = any(indicator in error_str for indicator in [
|
|
514
|
+
'model identifier is invalid',
|
|
515
|
+
'bedrockexception',
|
|
516
|
+
'output token',
|
|
517
|
+
'response too large',
|
|
518
|
+
'max_tokens_to_sample',
|
|
519
|
+
'output_token_limit'
|
|
520
|
+
])
|
|
521
|
+
|
|
522
|
+
if is_context_error or is_output_limit_error:
|
|
523
|
+
error_type = "output limit" if is_output_limit_error else "context window"
|
|
524
|
+
logger.warning(f"{error_type.title()} exceeded during tool execution iteration {iteration}")
|
|
525
|
+
|
|
526
|
+
# Find the last tool message and its associated tool name
|
|
527
|
+
last_tool_msg_idx = None
|
|
528
|
+
last_tool_name = None
|
|
529
|
+
last_tool_call_id = None
|
|
530
|
+
|
|
531
|
+
# First, find the last tool message
|
|
532
|
+
for i in range(len(new_messages) - 1, -1, -1):
|
|
533
|
+
msg = new_messages[i]
|
|
534
|
+
if hasattr(msg, 'tool_call_id') or (hasattr(msg, 'type') and getattr(msg, 'type', None) == 'tool'):
|
|
535
|
+
last_tool_msg_idx = i
|
|
536
|
+
last_tool_call_id = getattr(msg, 'tool_call_id', None)
|
|
537
|
+
break
|
|
538
|
+
|
|
539
|
+
# Find the tool name from the AIMessage that requested this tool call
|
|
540
|
+
if last_tool_call_id:
|
|
541
|
+
for i in range(last_tool_msg_idx - 1, -1, -1):
|
|
542
|
+
msg = new_messages[i]
|
|
543
|
+
if hasattr(msg, 'tool_calls') and msg.tool_calls:
|
|
544
|
+
for tc in msg.tool_calls:
|
|
545
|
+
tc_id = tc.get('id', '') if isinstance(tc, dict) else getattr(tc, 'id', '')
|
|
546
|
+
if tc_id == last_tool_call_id:
|
|
547
|
+
last_tool_name = tc.get('name', '') if isinstance(tc, dict) else getattr(tc, 'name', '')
|
|
548
|
+
break
|
|
549
|
+
if last_tool_name:
|
|
550
|
+
break
|
|
551
|
+
|
|
552
|
+
# Build dynamic suggestion based on the tool that caused the overflow
|
|
553
|
+
tool_suggestions = self._get_tool_truncation_suggestions(last_tool_name)
|
|
554
|
+
|
|
555
|
+
# Truncate the problematic tool result if found
|
|
556
|
+
if last_tool_msg_idx is not None:
|
|
557
|
+
from langchain_core.messages import ToolMessage
|
|
558
|
+
original_msg = new_messages[last_tool_msg_idx]
|
|
559
|
+
tool_call_id = getattr(original_msg, 'tool_call_id', 'unknown')
|
|
560
|
+
|
|
561
|
+
# Build error-specific guidance
|
|
562
|
+
if is_output_limit_error:
|
|
563
|
+
truncated_content = (
|
|
564
|
+
f"⚠️ MODEL OUTPUT LIMIT EXCEEDED\n\n"
|
|
565
|
+
f"The tool '{last_tool_name or 'unknown'}' returned data, but the model's response was too large.\n\n"
|
|
566
|
+
f"IMPORTANT: You must provide a SMALLER, more focused response.\n"
|
|
567
|
+
f"- Break down your response into smaller chunks\n"
|
|
568
|
+
f"- Summarize instead of listing everything\n"
|
|
569
|
+
f"- Focus on the most relevant information first\n"
|
|
570
|
+
f"- If listing items, show only top 5-10 most important\n\n"
|
|
571
|
+
f"Tool-specific tips:\n{tool_suggestions}\n\n"
|
|
572
|
+
f"Please retry with a more concise response."
|
|
573
|
+
)
|
|
574
|
+
else:
|
|
575
|
+
truncated_content = (
|
|
576
|
+
f"⚠️ TOOL OUTPUT TRUNCATED - Context window exceeded\n\n"
|
|
577
|
+
f"The tool '{last_tool_name or 'unknown'}' returned too much data for the model's context window.\n\n"
|
|
578
|
+
f"To fix this:\n{tool_suggestions}\n\n"
|
|
579
|
+
f"Please retry with more restrictive parameters."
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
truncated_msg = ToolMessage(
|
|
583
|
+
content=truncated_content,
|
|
584
|
+
tool_call_id=tool_call_id
|
|
585
|
+
)
|
|
586
|
+
new_messages[last_tool_msg_idx] = truncated_msg
|
|
587
|
+
|
|
588
|
+
logger.info(f"Truncated large tool result from '{last_tool_name}' and continuing")
|
|
589
|
+
# Continue to next iteration - the model will see the truncation message
|
|
590
|
+
continue
|
|
591
|
+
else:
|
|
592
|
+
# Couldn't find tool message, add error and break
|
|
593
|
+
if is_output_limit_error:
|
|
594
|
+
error_msg = (
|
|
595
|
+
"Model output limit exceeded. Please provide a more concise response. "
|
|
596
|
+
"Break down your answer into smaller parts and summarize where possible."
|
|
597
|
+
)
|
|
598
|
+
else:
|
|
599
|
+
error_msg = (
|
|
600
|
+
"Context window exceeded. The conversation or tool results are too large. "
|
|
601
|
+
"Try using tools with smaller output limits (e.g., max_items, max_depth parameters)."
|
|
602
|
+
)
|
|
603
|
+
new_messages.append(AIMessage(content=error_msg))
|
|
604
|
+
break
|
|
605
|
+
else:
|
|
606
|
+
logger.error(f"Error in LLM call during iteration {iteration}: {e}")
|
|
607
|
+
# Add error message and break the loop
|
|
608
|
+
error_msg = f"Error processing tool results in iteration {iteration}: {str(e)}"
|
|
609
|
+
new_messages.append(AIMessage(content=error_msg))
|
|
610
|
+
break
|
|
611
|
+
|
|
612
|
+
# Handle max iterations
|
|
613
|
+
if iteration >= self.steps_limit:
|
|
614
|
+
logger.warning(f"Reached maximum iterations ({self.steps_limit}) for tool execution")
|
|
615
|
+
|
|
616
|
+
# CRITICAL: Check if the last message is an AIMessage with pending tool_calls
|
|
617
|
+
# that were not processed. If so, we need to add placeholder ToolMessages to prevent
|
|
618
|
+
# the "assistant message with 'tool_calls' must be followed by tool messages" error
|
|
619
|
+
# when the conversation continues.
|
|
620
|
+
if new_messages:
|
|
621
|
+
last_msg = new_messages[-1]
|
|
622
|
+
if hasattr(last_msg, 'tool_calls') and last_msg.tool_calls:
|
|
623
|
+
from langchain_core.messages import ToolMessage
|
|
624
|
+
pending_tool_calls = last_msg.tool_calls if hasattr(last_msg.tool_calls, '__iter__') else []
|
|
625
|
+
|
|
626
|
+
# Check which tool_call_ids already have responses
|
|
627
|
+
existing_tool_call_ids = set()
|
|
628
|
+
for msg in new_messages:
|
|
629
|
+
if hasattr(msg, 'tool_call_id'):
|
|
630
|
+
existing_tool_call_ids.add(msg.tool_call_id)
|
|
631
|
+
|
|
632
|
+
# Add placeholder responses for any tool calls without responses
|
|
633
|
+
for tool_call in pending_tool_calls:
|
|
634
|
+
tool_call_id = tool_call.get('id', '') if isinstance(tool_call, dict) else getattr(tool_call, 'id', '')
|
|
635
|
+
tool_name = tool_call.get('name', '') if isinstance(tool_call, dict) else getattr(tool_call, 'name', '')
|
|
636
|
+
|
|
637
|
+
if tool_call_id and tool_call_id not in existing_tool_call_ids:
|
|
638
|
+
logger.info(f"Adding placeholder ToolMessage for interrupted tool call: {tool_name} ({tool_call_id})")
|
|
639
|
+
placeholder_msg = ToolMessage(
|
|
640
|
+
content=f"[Tool execution interrupted - step limit ({self.steps_limit}) reached before {tool_name} could be executed]",
|
|
641
|
+
tool_call_id=tool_call_id
|
|
642
|
+
)
|
|
643
|
+
new_messages.append(placeholder_msg)
|
|
644
|
+
|
|
645
|
+
# Add warning message - CLI or calling code can detect this and prompt user
|
|
646
|
+
warning_msg = f"Maximum tool execution iterations ({self.steps_limit}) reached. Stopping tool execution."
|
|
647
|
+
new_messages.append(AIMessage(content=warning_msg))
|
|
648
|
+
else:
|
|
649
|
+
logger.info(f"Tool execution completed after {iteration} iterations")
|
|
650
|
+
|
|
651
|
+
return new_messages, current_completion
|
|
652
|
+
|
|
653
|
+
def __get_struct_output_model(self, llm_client, pydantic_model):
|
|
654
|
+
return llm_client.with_structured_output(pydantic_model)
|