hud-python 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hud-python might be problematic. Click here for more details.
- hud/__init__.py +22 -22
- hud/agents/__init__.py +13 -15
- hud/agents/base.py +599 -599
- hud/agents/claude.py +373 -373
- hud/agents/langchain.py +261 -250
- hud/agents/misc/__init__.py +7 -7
- hud/agents/misc/response_agent.py +82 -80
- hud/agents/openai.py +352 -352
- hud/agents/openai_chat_generic.py +154 -154
- hud/agents/tests/__init__.py +1 -1
- hud/agents/tests/test_base.py +742 -742
- hud/agents/tests/test_claude.py +324 -324
- hud/agents/tests/test_client.py +363 -363
- hud/agents/tests/test_openai.py +237 -237
- hud/cli/__init__.py +617 -617
- hud/cli/__main__.py +8 -8
- hud/cli/analyze.py +371 -371
- hud/cli/analyze_metadata.py +230 -230
- hud/cli/build.py +498 -427
- hud/cli/clone.py +185 -185
- hud/cli/cursor.py +92 -92
- hud/cli/debug.py +392 -392
- hud/cli/docker_utils.py +83 -83
- hud/cli/init.py +280 -281
- hud/cli/interactive.py +353 -353
- hud/cli/mcp_server.py +764 -756
- hud/cli/pull.py +330 -336
- hud/cli/push.py +404 -370
- hud/cli/remote_runner.py +311 -311
- hud/cli/runner.py +160 -160
- hud/cli/tests/__init__.py +3 -3
- hud/cli/tests/test_analyze.py +284 -284
- hud/cli/tests/test_cli_init.py +265 -265
- hud/cli/tests/test_cli_main.py +27 -27
- hud/cli/tests/test_clone.py +142 -142
- hud/cli/tests/test_cursor.py +253 -253
- hud/cli/tests/test_debug.py +453 -453
- hud/cli/tests/test_mcp_server.py +139 -139
- hud/cli/tests/test_utils.py +388 -388
- hud/cli/utils.py +263 -263
- hud/clients/README.md +143 -143
- hud/clients/__init__.py +16 -16
- hud/clients/base.py +378 -379
- hud/clients/fastmcp.py +222 -222
- hud/clients/mcp_use.py +298 -278
- hud/clients/tests/__init__.py +1 -1
- hud/clients/tests/test_client_integration.py +111 -111
- hud/clients/tests/test_fastmcp.py +342 -342
- hud/clients/tests/test_protocol.py +188 -188
- hud/clients/utils/__init__.py +1 -1
- hud/clients/utils/retry_transport.py +160 -160
- hud/datasets.py +327 -322
- hud/misc/__init__.py +1 -1
- hud/misc/claude_plays_pokemon.py +292 -292
- hud/otel/__init__.py +35 -35
- hud/otel/collector.py +142 -142
- hud/otel/config.py +164 -164
- hud/otel/context.py +536 -536
- hud/otel/exporters.py +366 -366
- hud/otel/instrumentation.py +97 -97
- hud/otel/processors.py +118 -118
- hud/otel/tests/__init__.py +1 -1
- hud/otel/tests/test_processors.py +197 -197
- hud/server/__init__.py +5 -5
- hud/server/context.py +114 -114
- hud/server/helper/__init__.py +5 -5
- hud/server/low_level.py +132 -132
- hud/server/server.py +170 -166
- hud/server/tests/__init__.py +3 -3
- hud/settings.py +73 -73
- hud/shared/__init__.py +5 -5
- hud/shared/exceptions.py +180 -180
- hud/shared/requests.py +264 -264
- hud/shared/tests/test_exceptions.py +157 -157
- hud/shared/tests/test_requests.py +275 -275
- hud/telemetry/__init__.py +25 -25
- hud/telemetry/instrument.py +379 -379
- hud/telemetry/job.py +309 -309
- hud/telemetry/replay.py +74 -74
- hud/telemetry/trace.py +83 -83
- hud/tools/__init__.py +33 -33
- hud/tools/base.py +365 -365
- hud/tools/bash.py +161 -161
- hud/tools/computer/__init__.py +15 -15
- hud/tools/computer/anthropic.py +437 -437
- hud/tools/computer/hud.py +376 -376
- hud/tools/computer/openai.py +295 -295
- hud/tools/computer/settings.py +82 -82
- hud/tools/edit.py +314 -314
- hud/tools/executors/__init__.py +30 -30
- hud/tools/executors/base.py +539 -539
- hud/tools/executors/pyautogui.py +621 -621
- hud/tools/executors/tests/__init__.py +1 -1
- hud/tools/executors/tests/test_base_executor.py +338 -338
- hud/tools/executors/tests/test_pyautogui_executor.py +165 -165
- hud/tools/executors/xdo.py +511 -511
- hud/tools/playwright.py +412 -412
- hud/tools/tests/__init__.py +3 -3
- hud/tools/tests/test_base.py +282 -282
- hud/tools/tests/test_bash.py +158 -158
- hud/tools/tests/test_bash_extended.py +197 -197
- hud/tools/tests/test_computer.py +425 -425
- hud/tools/tests/test_computer_actions.py +34 -34
- hud/tools/tests/test_edit.py +259 -259
- hud/tools/tests/test_init.py +27 -27
- hud/tools/tests/test_playwright_tool.py +183 -183
- hud/tools/tests/test_tools.py +145 -145
- hud/tools/tests/test_utils.py +156 -156
- hud/tools/types.py +72 -72
- hud/tools/utils.py +50 -50
- hud/types.py +136 -136
- hud/utils/__init__.py +10 -10
- hud/utils/async_utils.py +65 -65
- hud/utils/design.py +236 -168
- hud/utils/mcp.py +55 -55
- hud/utils/progress.py +149 -149
- hud/utils/telemetry.py +66 -66
- hud/utils/tests/test_async_utils.py +173 -173
- hud/utils/tests/test_init.py +17 -17
- hud/utils/tests/test_progress.py +261 -261
- hud/utils/tests/test_telemetry.py +82 -82
- hud/utils/tests/test_version.py +8 -8
- hud/version.py +7 -7
- {hud_python-0.4.1.dist-info → hud_python-0.4.3.dist-info}/METADATA +10 -8
- hud_python-0.4.3.dist-info/RECORD +131 -0
- {hud_python-0.4.1.dist-info → hud_python-0.4.3.dist-info}/licenses/LICENSE +21 -21
- hud/agents/art.py +0 -101
- hud_python-0.4.1.dist-info/RECORD +0 -132
- {hud_python-0.4.1.dist-info → hud_python-0.4.3.dist-info}/WHEEL +0 -0
- {hud_python-0.4.1.dist-info → hud_python-0.4.3.dist-info}/entry_points.txt +0 -0
hud/agents/langchain.py
CHANGED
|
@@ -1,250 +1,261 @@
|
|
|
1
|
-
"""LangChain MCP Agent implementation."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import logging
|
|
6
|
-
from typing import TYPE_CHECKING, Any, ClassVar
|
|
7
|
-
|
|
8
|
-
import mcp.types as types
|
|
9
|
-
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
|
10
|
-
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
11
|
-
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
from
|
|
18
|
-
from
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
# Create prompt
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
#
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
"
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
1
|
+
"""LangChain MCP Agent implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from typing import TYPE_CHECKING, Any, ClassVar
|
|
7
|
+
|
|
8
|
+
import mcp.types as types
|
|
9
|
+
from langchain.agents import AgentExecutor, create_tool_calling_agent
|
|
10
|
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
11
|
+
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
|
|
12
|
+
|
|
13
|
+
import hud
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from langchain.schema.language_model import BaseLanguageModel
|
|
17
|
+
from langchain_core.tools import BaseTool
|
|
18
|
+
from mcp_use.adapters.langchain_adapter import LangChainAdapter
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
from mcp_use.adapters.langchain_adapter import LangChainAdapter
|
|
22
|
+
except ImportError:
|
|
23
|
+
LangChainAdapter = None # type: ignore[misc, assignment]
|
|
24
|
+
|
|
25
|
+
from hud.types import AgentResponse, MCPToolCall, MCPToolResult
|
|
26
|
+
|
|
27
|
+
from .base import MCPAgent
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class LangChainAgent(MCPAgent):
|
|
33
|
+
"""
|
|
34
|
+
LangChain agent that uses MCP servers for tool execution.
|
|
35
|
+
|
|
36
|
+
This agent wraps any LangChain-compatible LLM and provides
|
|
37
|
+
access to MCP tools through LangChain's tool-calling interface.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
metadata: ClassVar[dict[str, Any]] = {
|
|
41
|
+
"display_width": 1920,
|
|
42
|
+
"display_height": 1080,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
llm: BaseLanguageModel,
|
|
48
|
+
**kwargs: Any,
|
|
49
|
+
) -> None:
|
|
50
|
+
"""
|
|
51
|
+
Initialize LangChain MCP agent.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
llm: Any LangChain-compatible language model
|
|
55
|
+
**kwargs: Additional arguments passed to BaseMCPAgent
|
|
56
|
+
"""
|
|
57
|
+
super().__init__(**kwargs)
|
|
58
|
+
|
|
59
|
+
if LangChainAdapter is None:
|
|
60
|
+
raise ImportError(
|
|
61
|
+
"LangChainAdapter is not available. "
|
|
62
|
+
"Please install the optional agent dependencies: pip install 'hud-python[agent]'"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
self.llm = llm
|
|
66
|
+
self.adapter = LangChainAdapter(disallowed_tools=self.disallowed_tools)
|
|
67
|
+
self._langchain_tools: list[BaseTool] | None = None
|
|
68
|
+
|
|
69
|
+
self.model_name = (
|
|
70
|
+
"langchain-" + self.llm.model_name # type: ignore
|
|
71
|
+
if hasattr(self.llm, "model_name")
|
|
72
|
+
else "unknown"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def _get_langchain_tools(self) -> list[BaseTool]:
|
|
76
|
+
"""Get or create LangChain tools from MCP tools."""
|
|
77
|
+
if self._langchain_tools is not None:
|
|
78
|
+
return self._langchain_tools
|
|
79
|
+
|
|
80
|
+
# Create LangChain tools from MCP tools using the adapter
|
|
81
|
+
self._langchain_tools = []
|
|
82
|
+
|
|
83
|
+
# Convert available tools using the adapter; no server grouping
|
|
84
|
+
langchain_tools = self.adapter._convert_tools(self._available_tools, "default") # type: ignore[reportAttributeAccessIssue]
|
|
85
|
+
self._langchain_tools.extend(langchain_tools)
|
|
86
|
+
|
|
87
|
+
logger.info("Created %s LangChain tools from MCP tools", len(self._langchain_tools))
|
|
88
|
+
return self._langchain_tools
|
|
89
|
+
|
|
90
|
+
async def get_system_messages(self) -> list[BaseMessage]:
|
|
91
|
+
"""Get system messages for LangChain."""
|
|
92
|
+
return [SystemMessage(content=self.system_prompt)]
|
|
93
|
+
|
|
94
|
+
async def format_blocks(self, blocks: list[types.ContentBlock]) -> list[BaseMessage]:
|
|
95
|
+
"""Create initial messages for LangChain."""
|
|
96
|
+
messages = []
|
|
97
|
+
for block in blocks:
|
|
98
|
+
if isinstance(block, types.TextContent):
|
|
99
|
+
messages.append(HumanMessage(content=block.text))
|
|
100
|
+
elif isinstance(block, types.ImageContent):
|
|
101
|
+
messages.append(HumanMessage(content=block.data))
|
|
102
|
+
return messages
|
|
103
|
+
|
|
104
|
+
@hud.instrument(
|
|
105
|
+
span_type="agent",
|
|
106
|
+
record_args=False, # Messages can be large
|
|
107
|
+
record_result=True,
|
|
108
|
+
)
|
|
109
|
+
async def get_response(self, messages: list[BaseMessage]) -> AgentResponse:
|
|
110
|
+
"""Get response from LangChain model including any tool calls."""
|
|
111
|
+
# Get LangChain tools (created lazily)
|
|
112
|
+
langchain_tools = self._get_langchain_tools()
|
|
113
|
+
|
|
114
|
+
# Create a prompt template from current messages
|
|
115
|
+
# Extract system message if present
|
|
116
|
+
system_content = "You are a helpful assistant"
|
|
117
|
+
non_system_messages = []
|
|
118
|
+
|
|
119
|
+
for msg in messages:
|
|
120
|
+
if isinstance(msg, SystemMessage):
|
|
121
|
+
system_content = str(msg.content)
|
|
122
|
+
else:
|
|
123
|
+
non_system_messages.append(msg)
|
|
124
|
+
|
|
125
|
+
# Create prompt with placeholders
|
|
126
|
+
prompt = ChatPromptTemplate.from_messages(
|
|
127
|
+
[
|
|
128
|
+
("system", system_content),
|
|
129
|
+
MessagesPlaceholder(variable_name="chat_history"),
|
|
130
|
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
|
131
|
+
]
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Create agent with tools
|
|
135
|
+
agent = create_tool_calling_agent(
|
|
136
|
+
llm=self.llm,
|
|
137
|
+
tools=langchain_tools,
|
|
138
|
+
prompt=prompt,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
# Create executor
|
|
142
|
+
executor = AgentExecutor(
|
|
143
|
+
agent=agent,
|
|
144
|
+
tools=langchain_tools,
|
|
145
|
+
verbose=False,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Format the last user message as input
|
|
149
|
+
last_user_msg = None
|
|
150
|
+
for msg in reversed(non_system_messages):
|
|
151
|
+
if isinstance(msg, HumanMessage):
|
|
152
|
+
last_user_msg = msg
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
if not last_user_msg:
|
|
156
|
+
return AgentResponse(content="No user message found", tool_calls=[], done=True)
|
|
157
|
+
|
|
158
|
+
# Extract text from message content
|
|
159
|
+
input_text = ""
|
|
160
|
+
if isinstance(last_user_msg.content, str):
|
|
161
|
+
input_text = last_user_msg.content
|
|
162
|
+
elif isinstance(last_user_msg.content, list):
|
|
163
|
+
# Extract text from multimodal content
|
|
164
|
+
for item in last_user_msg.content:
|
|
165
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
166
|
+
input_text = item.get("text", "")
|
|
167
|
+
break
|
|
168
|
+
|
|
169
|
+
# Build chat history (exclude last user message and system)
|
|
170
|
+
chat_history = []
|
|
171
|
+
for _, msg in enumerate(non_system_messages[:-1]):
|
|
172
|
+
if isinstance(msg, HumanMessage | AIMessage):
|
|
173
|
+
chat_history.append(msg)
|
|
174
|
+
|
|
175
|
+
# Execute the agent
|
|
176
|
+
try:
|
|
177
|
+
result = await executor.ainvoke(
|
|
178
|
+
{
|
|
179
|
+
"input": input_text,
|
|
180
|
+
"chat_history": chat_history,
|
|
181
|
+
}
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Process the result
|
|
185
|
+
output = result.get("output", "")
|
|
186
|
+
|
|
187
|
+
# Check if tools were called
|
|
188
|
+
if result.get("intermediate_steps"):
|
|
189
|
+
# Tools were called
|
|
190
|
+
tool_calls = []
|
|
191
|
+
for action, _ in result["intermediate_steps"]:
|
|
192
|
+
if hasattr(action, "tool") and hasattr(action, "tool_input"):
|
|
193
|
+
tool_calls.append(
|
|
194
|
+
MCPToolCall(
|
|
195
|
+
name=action.tool,
|
|
196
|
+
arguments=action.tool_input,
|
|
197
|
+
)
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
return AgentResponse(content=output, tool_calls=tool_calls, done=False)
|
|
201
|
+
else:
|
|
202
|
+
# No tools called, just text response
|
|
203
|
+
return AgentResponse(content=output, tool_calls=[], done=True)
|
|
204
|
+
|
|
205
|
+
except Exception as e:
|
|
206
|
+
logger.error("Agent execution failed: %s", e)
|
|
207
|
+
return AgentResponse(content=f"Error: {e!s}", tool_calls=[], done=True)
|
|
208
|
+
|
|
209
|
+
async def format_tool_results(
|
|
210
|
+
self, tool_calls: list[MCPToolCall], tool_results: list[MCPToolResult]
|
|
211
|
+
) -> list[BaseMessage]:
|
|
212
|
+
"""Format tool results into LangChain messages."""
|
|
213
|
+
# Create an AI message with the tool calls and results
|
|
214
|
+
messages = []
|
|
215
|
+
|
|
216
|
+
# First add an AI message indicating tools were called
|
|
217
|
+
tool_names = [tc.name for tc in tool_calls]
|
|
218
|
+
ai_content = f"I'll use the following tools: {', '.join(tool_names)}"
|
|
219
|
+
messages.append(AIMessage(content=ai_content))
|
|
220
|
+
|
|
221
|
+
# Build result text from tool results
|
|
222
|
+
text_parts = []
|
|
223
|
+
latest_screenshot = None
|
|
224
|
+
|
|
225
|
+
for tool_call, result in zip(tool_calls, tool_results, strict=False):
|
|
226
|
+
if result.isError:
|
|
227
|
+
error_text = "Tool execution failed"
|
|
228
|
+
for content in result.content:
|
|
229
|
+
if isinstance(content, types.TextContent):
|
|
230
|
+
error_text = content.text
|
|
231
|
+
break
|
|
232
|
+
text_parts.append(f"Error - {tool_call.name}: {error_text}")
|
|
233
|
+
else:
|
|
234
|
+
# Process success content
|
|
235
|
+
tool_output = []
|
|
236
|
+
for content in result.content:
|
|
237
|
+
if isinstance(content, types.TextContent):
|
|
238
|
+
tool_output.append(content.text)
|
|
239
|
+
elif isinstance(content, types.ImageContent):
|
|
240
|
+
latest_screenshot = content.data
|
|
241
|
+
|
|
242
|
+
if tool_output:
|
|
243
|
+
text_parts.append(f"{tool_call.name}: " + " ".join(tool_output))
|
|
244
|
+
|
|
245
|
+
result_text = "\n".join(text_parts) if text_parts else "No output from tools"
|
|
246
|
+
|
|
247
|
+
# Then add a human message with the tool results
|
|
248
|
+
if latest_screenshot:
|
|
249
|
+
# Include screenshot in multimodal format
|
|
250
|
+
content = [
|
|
251
|
+
{"type": "text", "text": f"Tool results:\n{result_text}"},
|
|
252
|
+
{
|
|
253
|
+
"type": "image_url",
|
|
254
|
+
"image_url": {"url": f"data:image/png;base64,{latest_screenshot}"},
|
|
255
|
+
},
|
|
256
|
+
]
|
|
257
|
+
messages.append(HumanMessage(content=content))
|
|
258
|
+
else:
|
|
259
|
+
messages.append(HumanMessage(content=f"Tool results:\n{result_text}"))
|
|
260
|
+
|
|
261
|
+
return messages
|
hud/agents/misc/__init__.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
"""Miscellaneous agents."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from .response_agent import ResponseAgent
|
|
6
|
-
|
|
7
|
-
__all__ = ["ResponseAgent"]
|
|
1
|
+
"""Miscellaneous agents."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .response_agent import ResponseAgent
|
|
6
|
+
|
|
7
|
+
__all__ = ["ResponseAgent"]
|