openhands-sdk 1.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (180) hide show
  1. openhands/sdk/__init__.py +111 -0
  2. openhands/sdk/agent/__init__.py +8 -0
  3. openhands/sdk/agent/agent.py +650 -0
  4. openhands/sdk/agent/base.py +457 -0
  5. openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
  6. openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
  7. openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
  8. openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
  9. openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +2 -0
  10. openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
  11. openhands/sdk/agent/prompts/security_policy.j2 +22 -0
  12. openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
  13. openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
  14. openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
  15. openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
  16. openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
  17. openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
  18. openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
  19. openhands/sdk/agent/utils.py +228 -0
  20. openhands/sdk/context/__init__.py +28 -0
  21. openhands/sdk/context/agent_context.py +264 -0
  22. openhands/sdk/context/condenser/__init__.py +18 -0
  23. openhands/sdk/context/condenser/base.py +100 -0
  24. openhands/sdk/context/condenser/llm_summarizing_condenser.py +248 -0
  25. openhands/sdk/context/condenser/no_op_condenser.py +14 -0
  26. openhands/sdk/context/condenser/pipeline_condenser.py +56 -0
  27. openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
  28. openhands/sdk/context/condenser/utils.py +149 -0
  29. openhands/sdk/context/prompts/__init__.py +6 -0
  30. openhands/sdk/context/prompts/prompt.py +114 -0
  31. openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
  32. openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
  33. openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
  34. openhands/sdk/context/skills/__init__.py +28 -0
  35. openhands/sdk/context/skills/exceptions.py +11 -0
  36. openhands/sdk/context/skills/skill.py +720 -0
  37. openhands/sdk/context/skills/trigger.py +36 -0
  38. openhands/sdk/context/skills/types.py +48 -0
  39. openhands/sdk/context/view.py +503 -0
  40. openhands/sdk/conversation/__init__.py +40 -0
  41. openhands/sdk/conversation/base.py +281 -0
  42. openhands/sdk/conversation/conversation.py +152 -0
  43. openhands/sdk/conversation/conversation_stats.py +85 -0
  44. openhands/sdk/conversation/event_store.py +157 -0
  45. openhands/sdk/conversation/events_list_base.py +17 -0
  46. openhands/sdk/conversation/exceptions.py +50 -0
  47. openhands/sdk/conversation/fifo_lock.py +133 -0
  48. openhands/sdk/conversation/impl/__init__.py +5 -0
  49. openhands/sdk/conversation/impl/local_conversation.py +665 -0
  50. openhands/sdk/conversation/impl/remote_conversation.py +956 -0
  51. openhands/sdk/conversation/persistence_const.py +9 -0
  52. openhands/sdk/conversation/response_utils.py +41 -0
  53. openhands/sdk/conversation/secret_registry.py +126 -0
  54. openhands/sdk/conversation/serialization_diff.py +0 -0
  55. openhands/sdk/conversation/state.py +392 -0
  56. openhands/sdk/conversation/stuck_detector.py +311 -0
  57. openhands/sdk/conversation/title_utils.py +191 -0
  58. openhands/sdk/conversation/types.py +45 -0
  59. openhands/sdk/conversation/visualizer/__init__.py +12 -0
  60. openhands/sdk/conversation/visualizer/base.py +67 -0
  61. openhands/sdk/conversation/visualizer/default.py +373 -0
  62. openhands/sdk/critic/__init__.py +15 -0
  63. openhands/sdk/critic/base.py +38 -0
  64. openhands/sdk/critic/impl/__init__.py +12 -0
  65. openhands/sdk/critic/impl/agent_finished.py +83 -0
  66. openhands/sdk/critic/impl/empty_patch.py +49 -0
  67. openhands/sdk/critic/impl/pass_critic.py +42 -0
  68. openhands/sdk/event/__init__.py +42 -0
  69. openhands/sdk/event/base.py +149 -0
  70. openhands/sdk/event/condenser.py +82 -0
  71. openhands/sdk/event/conversation_error.py +25 -0
  72. openhands/sdk/event/conversation_state.py +104 -0
  73. openhands/sdk/event/llm_completion_log.py +39 -0
  74. openhands/sdk/event/llm_convertible/__init__.py +20 -0
  75. openhands/sdk/event/llm_convertible/action.py +139 -0
  76. openhands/sdk/event/llm_convertible/message.py +142 -0
  77. openhands/sdk/event/llm_convertible/observation.py +141 -0
  78. openhands/sdk/event/llm_convertible/system.py +61 -0
  79. openhands/sdk/event/token.py +16 -0
  80. openhands/sdk/event/types.py +11 -0
  81. openhands/sdk/event/user_action.py +21 -0
  82. openhands/sdk/git/exceptions.py +43 -0
  83. openhands/sdk/git/git_changes.py +249 -0
  84. openhands/sdk/git/git_diff.py +129 -0
  85. openhands/sdk/git/models.py +21 -0
  86. openhands/sdk/git/utils.py +189 -0
  87. openhands/sdk/hooks/__init__.py +30 -0
  88. openhands/sdk/hooks/config.py +180 -0
  89. openhands/sdk/hooks/conversation_hooks.py +227 -0
  90. openhands/sdk/hooks/executor.py +155 -0
  91. openhands/sdk/hooks/manager.py +170 -0
  92. openhands/sdk/hooks/types.py +40 -0
  93. openhands/sdk/io/__init__.py +6 -0
  94. openhands/sdk/io/base.py +48 -0
  95. openhands/sdk/io/cache.py +85 -0
  96. openhands/sdk/io/local.py +119 -0
  97. openhands/sdk/io/memory.py +54 -0
  98. openhands/sdk/llm/__init__.py +45 -0
  99. openhands/sdk/llm/exceptions/__init__.py +45 -0
  100. openhands/sdk/llm/exceptions/classifier.py +50 -0
  101. openhands/sdk/llm/exceptions/mapping.py +54 -0
  102. openhands/sdk/llm/exceptions/types.py +101 -0
  103. openhands/sdk/llm/llm.py +1140 -0
  104. openhands/sdk/llm/llm_registry.py +122 -0
  105. openhands/sdk/llm/llm_response.py +59 -0
  106. openhands/sdk/llm/message.py +656 -0
  107. openhands/sdk/llm/mixins/fn_call_converter.py +1288 -0
  108. openhands/sdk/llm/mixins/non_native_fc.py +97 -0
  109. openhands/sdk/llm/options/__init__.py +1 -0
  110. openhands/sdk/llm/options/chat_options.py +93 -0
  111. openhands/sdk/llm/options/common.py +19 -0
  112. openhands/sdk/llm/options/responses_options.py +67 -0
  113. openhands/sdk/llm/router/__init__.py +10 -0
  114. openhands/sdk/llm/router/base.py +117 -0
  115. openhands/sdk/llm/router/impl/multimodal.py +76 -0
  116. openhands/sdk/llm/router/impl/random.py +22 -0
  117. openhands/sdk/llm/streaming.py +9 -0
  118. openhands/sdk/llm/utils/metrics.py +312 -0
  119. openhands/sdk/llm/utils/model_features.py +192 -0
  120. openhands/sdk/llm/utils/model_info.py +90 -0
  121. openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
  122. openhands/sdk/llm/utils/retry_mixin.py +128 -0
  123. openhands/sdk/llm/utils/telemetry.py +362 -0
  124. openhands/sdk/llm/utils/unverified_models.py +156 -0
  125. openhands/sdk/llm/utils/verified_models.py +65 -0
  126. openhands/sdk/logger/__init__.py +22 -0
  127. openhands/sdk/logger/logger.py +195 -0
  128. openhands/sdk/logger/rolling.py +113 -0
  129. openhands/sdk/mcp/__init__.py +24 -0
  130. openhands/sdk/mcp/client.py +76 -0
  131. openhands/sdk/mcp/definition.py +106 -0
  132. openhands/sdk/mcp/exceptions.py +19 -0
  133. openhands/sdk/mcp/tool.py +270 -0
  134. openhands/sdk/mcp/utils.py +83 -0
  135. openhands/sdk/observability/__init__.py +4 -0
  136. openhands/sdk/observability/laminar.py +166 -0
  137. openhands/sdk/observability/utils.py +20 -0
  138. openhands/sdk/py.typed +0 -0
  139. openhands/sdk/secret/__init__.py +19 -0
  140. openhands/sdk/secret/secrets.py +92 -0
  141. openhands/sdk/security/__init__.py +6 -0
  142. openhands/sdk/security/analyzer.py +111 -0
  143. openhands/sdk/security/confirmation_policy.py +61 -0
  144. openhands/sdk/security/llm_analyzer.py +29 -0
  145. openhands/sdk/security/risk.py +100 -0
  146. openhands/sdk/tool/__init__.py +34 -0
  147. openhands/sdk/tool/builtins/__init__.py +34 -0
  148. openhands/sdk/tool/builtins/finish.py +106 -0
  149. openhands/sdk/tool/builtins/think.py +117 -0
  150. openhands/sdk/tool/registry.py +184 -0
  151. openhands/sdk/tool/schema.py +286 -0
  152. openhands/sdk/tool/spec.py +39 -0
  153. openhands/sdk/tool/tool.py +481 -0
  154. openhands/sdk/utils/__init__.py +22 -0
  155. openhands/sdk/utils/async_executor.py +115 -0
  156. openhands/sdk/utils/async_utils.py +39 -0
  157. openhands/sdk/utils/cipher.py +68 -0
  158. openhands/sdk/utils/command.py +90 -0
  159. openhands/sdk/utils/deprecation.py +166 -0
  160. openhands/sdk/utils/github.py +44 -0
  161. openhands/sdk/utils/json.py +48 -0
  162. openhands/sdk/utils/models.py +570 -0
  163. openhands/sdk/utils/paging.py +63 -0
  164. openhands/sdk/utils/pydantic_diff.py +85 -0
  165. openhands/sdk/utils/pydantic_secrets.py +64 -0
  166. openhands/sdk/utils/truncate.py +117 -0
  167. openhands/sdk/utils/visualize.py +58 -0
  168. openhands/sdk/workspace/__init__.py +17 -0
  169. openhands/sdk/workspace/base.py +158 -0
  170. openhands/sdk/workspace/local.py +189 -0
  171. openhands/sdk/workspace/models.py +35 -0
  172. openhands/sdk/workspace/remote/__init__.py +8 -0
  173. openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
  174. openhands/sdk/workspace/remote/base.py +164 -0
  175. openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
  176. openhands/sdk/workspace/workspace.py +49 -0
  177. openhands_sdk-1.7.3.dist-info/METADATA +17 -0
  178. openhands_sdk-1.7.3.dist-info/RECORD +180 -0
  179. openhands_sdk-1.7.3.dist-info/WHEEL +5 -0
  180. openhands_sdk-1.7.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,650 @@
1
+ import json
2
+
3
+ from pydantic import ValidationError, model_validator
4
+
5
+ import openhands.sdk.security.analyzer as analyzer
6
+ import openhands.sdk.security.risk as risk
7
+ from openhands.sdk.agent.base import AgentBase
8
+ from openhands.sdk.agent.utils import (
9
+ fix_malformed_tool_arguments,
10
+ make_llm_completion,
11
+ prepare_llm_messages,
12
+ )
13
+ from openhands.sdk.conversation import (
14
+ ConversationCallbackType,
15
+ ConversationState,
16
+ ConversationTokenCallbackType,
17
+ LocalConversation,
18
+ )
19
+ from openhands.sdk.conversation.state import ConversationExecutionStatus
20
+ from openhands.sdk.event import (
21
+ ActionEvent,
22
+ AgentErrorEvent,
23
+ LLMConvertibleEvent,
24
+ MessageEvent,
25
+ ObservationEvent,
26
+ SystemPromptEvent,
27
+ TokenEvent,
28
+ UserRejectObservation,
29
+ )
30
+ from openhands.sdk.event.condenser import Condensation, CondensationRequest
31
+ from openhands.sdk.llm import (
32
+ LLMResponse,
33
+ Message,
34
+ MessageToolCall,
35
+ ReasoningItemModel,
36
+ RedactedThinkingBlock,
37
+ TextContent,
38
+ ThinkingBlock,
39
+ )
40
+ from openhands.sdk.llm.exceptions import (
41
+ FunctionCallValidationError,
42
+ LLMContextWindowExceedError,
43
+ )
44
+ from openhands.sdk.logger import get_logger
45
+ from openhands.sdk.observability.laminar import (
46
+ maybe_init_laminar,
47
+ observe,
48
+ should_enable_observability,
49
+ )
50
+ from openhands.sdk.observability.utils import extract_action_name
51
+ from openhands.sdk.security.llm_analyzer import LLMSecurityAnalyzer
52
+ from openhands.sdk.tool import (
53
+ Action,
54
+ Observation,
55
+ )
56
+ from openhands.sdk.tool.builtins import (
57
+ FinishAction,
58
+ FinishTool,
59
+ ThinkAction,
60
+ )
61
+
62
+
63
+ logger = get_logger(__name__)
64
+ maybe_init_laminar()
65
+
66
+
67
+ class Agent(AgentBase):
68
+ """Main agent implementation for OpenHands.
69
+
70
+ The Agent class provides the core functionality for running AI agents that can
71
+ interact with tools, process messages, and execute actions. It inherits from
72
+ AgentBase and implements the agent execution logic.
73
+
74
+ Example:
75
+ >>> from openhands.sdk import LLM, Agent, Tool
76
+ >>> llm = LLM(model="claude-sonnet-4-20250514", api_key=SecretStr("key"))
77
+ >>> tools = [Tool(name="TerminalTool"), Tool(name="FileEditorTool")]
78
+ >>> agent = Agent(llm=llm, tools=tools)
79
+ """
80
+
81
+ @model_validator(mode="before")
82
+ @classmethod
83
+ def _add_security_prompt_as_default(cls, data):
84
+ """Ensure llm_security_analyzer=True is always set before initialization."""
85
+ if not isinstance(data, dict):
86
+ return data
87
+
88
+ kwargs = data.get("system_prompt_kwargs") or {}
89
+ if not isinstance(kwargs, dict):
90
+ kwargs = {}
91
+
92
+ kwargs.setdefault("llm_security_analyzer", True)
93
+ data["system_prompt_kwargs"] = kwargs
94
+ return data
95
+
96
+ def init_state(
97
+ self,
98
+ state: ConversationState,
99
+ on_event: ConversationCallbackType,
100
+ ) -> None:
101
+ super().init_state(state, on_event=on_event)
102
+ # TODO(openhands): we should add test to test this init_state will actually
103
+ # modify state in-place
104
+
105
+ llm_convertible_messages = [
106
+ event for event in state.events if isinstance(event, LLMConvertibleEvent)
107
+ ]
108
+ if len(llm_convertible_messages) == 0:
109
+ # Prepare system message
110
+ event = SystemPromptEvent(
111
+ source="agent",
112
+ system_prompt=TextContent(text=self.system_message),
113
+ # Tools are stored as ToolDefinition objects and converted to
114
+ # OpenAI format with security_risk parameter during LLM completion.
115
+ # See make_llm_completion() in agent/utils.py for details.
116
+ tools=list(self.tools_map.values()),
117
+ )
118
+ on_event(event)
119
+
120
+ def _execute_actions(
121
+ self,
122
+ conversation: LocalConversation,
123
+ action_events: list[ActionEvent],
124
+ on_event: ConversationCallbackType,
125
+ ):
126
+ for action_event in action_events:
127
+ self._execute_action_event(conversation, action_event, on_event=on_event)
128
+
129
+ @observe(name="agent.step", ignore_inputs=["state", "on_event"])
130
+ def step(
131
+ self,
132
+ conversation: LocalConversation,
133
+ on_event: ConversationCallbackType,
134
+ on_token: ConversationTokenCallbackType | None = None,
135
+ ) -> None:
136
+ state = conversation.state
137
+ # Check for pending actions (implicit confirmation)
138
+ # and execute them before sampling new actions.
139
+ pending_actions = ConversationState.get_unmatched_actions(state.events)
140
+ if pending_actions:
141
+ logger.info(
142
+ "Confirmation mode: Executing %d pending action(s)",
143
+ len(pending_actions),
144
+ )
145
+ self._execute_actions(conversation, pending_actions, on_event)
146
+ return
147
+
148
+ # Check if the last user message was blocked by a UserPromptSubmit hook
149
+ # If so, skip processing and mark conversation as finished
150
+ for event in reversed(list(state.events)):
151
+ if isinstance(event, MessageEvent) and event.source == "user":
152
+ reason = state.pop_blocked_message(event.id)
153
+ if reason is not None:
154
+ logger.info(f"User message blocked by hook: {reason}")
155
+ state.execution_status = ConversationExecutionStatus.FINISHED
156
+ return
157
+ break # Only check the most recent user message
158
+
159
+ # Prepare LLM messages using the utility function
160
+ _messages_or_condensation = prepare_llm_messages(
161
+ state.events, condenser=self.condenser, llm=self.llm
162
+ )
163
+
164
+ # Process condensation event before agent sampels another action
165
+ if isinstance(_messages_or_condensation, Condensation):
166
+ on_event(_messages_or_condensation)
167
+ return
168
+
169
+ _messages = _messages_or_condensation
170
+
171
+ logger.debug(
172
+ "Sending messages to LLM: "
173
+ f"{json.dumps([m.model_dump() for m in _messages[1:]], indent=2)}"
174
+ )
175
+
176
+ try:
177
+ llm_response = make_llm_completion(
178
+ self.llm,
179
+ _messages,
180
+ tools=list(self.tools_map.values()),
181
+ on_token=on_token,
182
+ )
183
+ except FunctionCallValidationError as e:
184
+ logger.warning(f"LLM generated malformed function call: {e}")
185
+ error_message = MessageEvent(
186
+ source="user",
187
+ llm_message=Message(
188
+ role="user",
189
+ content=[TextContent(text=str(e))],
190
+ ),
191
+ )
192
+ on_event(error_message)
193
+ return
194
+ except LLMContextWindowExceedError as e:
195
+ # If condenser is available and handles requests, trigger condensation
196
+ if (
197
+ self.condenser is not None
198
+ and self.condenser.handles_condensation_requests()
199
+ ):
200
+ logger.warning(
201
+ "LLM raised context window exceeded error, triggering condensation"
202
+ )
203
+ on_event(CondensationRequest())
204
+ return
205
+ # No condenser available or doesn't handle requests; log helpful warning
206
+ self._log_context_window_exceeded_warning()
207
+ raise e
208
+
209
+ # LLMResponse already contains the converted message and metrics snapshot
210
+ message: Message = llm_response.message
211
+
212
+ # Check if this is a reasoning-only response (e.g., from reasoning models)
213
+ # or a message-only response without tool calls
214
+ has_reasoning = (
215
+ message.responses_reasoning_item is not None
216
+ or message.reasoning_content is not None
217
+ or (message.thinking_blocks and len(message.thinking_blocks) > 0)
218
+ )
219
+ has_content = any(
220
+ isinstance(c, TextContent) and c.text.strip() for c in message.content
221
+ )
222
+
223
+ if message.tool_calls and len(message.tool_calls) > 0:
224
+ if not all(isinstance(c, TextContent) for c in message.content):
225
+ logger.warning(
226
+ "LLM returned tool calls but message content is not all "
227
+ "TextContent - ignoring non-text content"
228
+ )
229
+
230
+ # Generate unique batch ID for this LLM response
231
+ thought_content = [c for c in message.content if isinstance(c, TextContent)]
232
+
233
+ action_events: list[ActionEvent] = []
234
+ for i, tool_call in enumerate(message.tool_calls):
235
+ action_event = self._get_action_event(
236
+ tool_call,
237
+ llm_response_id=llm_response.id,
238
+ on_event=on_event,
239
+ security_analyzer=state.security_analyzer,
240
+ thought=thought_content
241
+ if i == 0
242
+ else [], # Only first gets thought
243
+ # Only first gets reasoning content
244
+ reasoning_content=message.reasoning_content if i == 0 else None,
245
+ # Only first gets thinking blocks
246
+ thinking_blocks=list(message.thinking_blocks) if i == 0 else [],
247
+ responses_reasoning_item=message.responses_reasoning_item
248
+ if i == 0
249
+ else None,
250
+ )
251
+ if action_event is None:
252
+ continue
253
+ action_events.append(action_event)
254
+
255
+ # Handle confirmation mode - exit early if actions need confirmation
256
+ if self._requires_user_confirmation(state, action_events):
257
+ return
258
+
259
+ if action_events:
260
+ self._execute_actions(conversation, action_events, on_event)
261
+
262
+ # Emit VLLM token ids if enabled before returning
263
+ self._maybe_emit_vllm_tokens(llm_response, on_event)
264
+ return
265
+
266
+ # No tool calls - emit message event for reasoning or content responses
267
+ if not has_reasoning and not has_content:
268
+ logger.warning("LLM produced empty response - continuing agent loop")
269
+
270
+ msg_event = MessageEvent(
271
+ source="agent",
272
+ llm_message=message,
273
+ llm_response_id=llm_response.id,
274
+ )
275
+ on_event(msg_event)
276
+
277
+ # Emit VLLM token ids if enabled
278
+ self._maybe_emit_vllm_tokens(llm_response, on_event)
279
+
280
+ # Finish conversation if LLM produced content (awaits user input)
281
+ # Continue if only reasoning without content (e.g., GPT-5 codex thinking)
282
+ if has_content:
283
+ logger.debug("LLM produced a message response - awaits user input")
284
+ state.execution_status = ConversationExecutionStatus.FINISHED
285
+ return
286
+
287
+ def _requires_user_confirmation(
288
+ self, state: ConversationState, action_events: list[ActionEvent]
289
+ ) -> bool:
290
+ """
291
+ Decide whether user confirmation is needed to proceed.
292
+
293
+ Rules:
294
+ 1. Confirmation mode is enabled
295
+ 2. Every action requires confirmation
296
+ 3. A single `FinishAction` never requires confirmation
297
+ 4. A single `ThinkAction` never requires confirmation
298
+ """
299
+ # A single `FinishAction` or `ThinkAction` never requires confirmation
300
+ if len(action_events) == 1 and isinstance(
301
+ action_events[0].action, (FinishAction, ThinkAction)
302
+ ):
303
+ return False
304
+
305
+ # If there are no actions there is nothing to confirm
306
+ if len(action_events) == 0:
307
+ return False
308
+
309
+ # If a security analyzer is registered, use it to grab the risks of the actions
310
+ # involved. If not, we'll set the risks to UNKNOWN.
311
+ if state.security_analyzer is not None:
312
+ risks = [
313
+ risk
314
+ for _, risk in state.security_analyzer.analyze_pending_actions(
315
+ action_events
316
+ )
317
+ ]
318
+ else:
319
+ risks = [risk.SecurityRisk.UNKNOWN] * len(action_events)
320
+
321
+ # Grab the confirmation policy from the state and pass in the risks.
322
+ if any(state.confirmation_policy.should_confirm(risk) for risk in risks):
323
+ state.execution_status = (
324
+ ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
325
+ )
326
+ return True
327
+
328
+ return False
329
+
330
+ def _extract_security_risk(
331
+ self,
332
+ arguments: dict,
333
+ tool_name: str,
334
+ read_only_tool: bool,
335
+ security_analyzer: analyzer.SecurityAnalyzerBase | None = None,
336
+ ) -> risk.SecurityRisk:
337
+ requires_sr = isinstance(security_analyzer, LLMSecurityAnalyzer)
338
+ raw = arguments.pop("security_risk", None)
339
+
340
+ # Default risk value for action event
341
+ # Tool is marked as read-only so security risk can be ignored
342
+ if read_only_tool:
343
+ return risk.SecurityRisk.UNKNOWN
344
+
345
+ # Raises exception if failed to pass risk field when expected
346
+ # Exception will be sent back to agent as error event
347
+ # Strong models like GPT-5 can correct itself by retrying
348
+ if requires_sr and raw is None:
349
+ raise ValueError(
350
+ f"Failed to provide security_risk field in tool '{tool_name}'"
351
+ )
352
+
353
+ # When using weaker models without security analyzer
354
+ # safely ignore missing security risk fields
355
+ if not requires_sr and raw is None:
356
+ return risk.SecurityRisk.UNKNOWN
357
+
358
+ # Raises exception if invalid risk enum passed by LLM
359
+ security_risk = risk.SecurityRisk(raw)
360
+ return security_risk
361
+
362
+ def _get_action_event(
363
+ self,
364
+ tool_call: MessageToolCall,
365
+ llm_response_id: str,
366
+ on_event: ConversationCallbackType,
367
+ security_analyzer: analyzer.SecurityAnalyzerBase | None = None,
368
+ thought: list[TextContent] | None = None,
369
+ reasoning_content: str | None = None,
370
+ thinking_blocks: list[ThinkingBlock | RedactedThinkingBlock] | None = None,
371
+ responses_reasoning_item: ReasoningItemModel | None = None,
372
+ ) -> ActionEvent | None:
373
+ """Converts a tool call into an ActionEvent, validating arguments.
374
+
375
+ NOTE: state will be mutated in-place.
376
+ """
377
+ tool_name = tool_call.name
378
+ tool = self.tools_map.get(tool_name, None)
379
+ # Handle non-existing tools
380
+ if tool is None:
381
+ available = list(self.tools_map.keys())
382
+ err = f"Tool '{tool_name}' not found. Available: {available}"
383
+ logger.error(err)
384
+ # Persist assistant function_call so next turn has matching call_id
385
+ tc_event = ActionEvent(
386
+ source="agent",
387
+ thought=thought or [],
388
+ reasoning_content=reasoning_content,
389
+ thinking_blocks=thinking_blocks or [],
390
+ responses_reasoning_item=responses_reasoning_item,
391
+ tool_call=tool_call,
392
+ tool_name=tool_call.name,
393
+ tool_call_id=tool_call.id,
394
+ llm_response_id=llm_response_id,
395
+ action=None,
396
+ )
397
+ on_event(tc_event)
398
+ event = AgentErrorEvent(
399
+ error=err,
400
+ tool_name=tool_name,
401
+ tool_call_id=tool_call.id,
402
+ )
403
+ on_event(event)
404
+ return
405
+
406
+ # Validate arguments
407
+ security_risk: risk.SecurityRisk = risk.SecurityRisk.UNKNOWN
408
+ try:
409
+ arguments = json.loads(tool_call.arguments)
410
+
411
+ # Fix malformed arguments (e.g., JSON strings for list/dict fields)
412
+ arguments = fix_malformed_tool_arguments(arguments, tool.action_type)
413
+ security_risk = self._extract_security_risk(
414
+ arguments,
415
+ tool.name,
416
+ tool.annotations.readOnlyHint if tool.annotations else False,
417
+ security_analyzer,
418
+ )
419
+ assert "security_risk" not in arguments, (
420
+ "Unexpected 'security_risk' key found in tool arguments"
421
+ )
422
+
423
+ action: Action = tool.action_from_arguments(arguments)
424
+ except (json.JSONDecodeError, ValidationError, ValueError) as e:
425
+ err = (
426
+ f"Error validating args {tool_call.arguments} for tool "
427
+ f"'{tool.name}': {e}"
428
+ )
429
+ # Persist assistant function_call so next turn has matching call_id
430
+ tc_event = ActionEvent(
431
+ source="agent",
432
+ thought=thought or [],
433
+ reasoning_content=reasoning_content,
434
+ thinking_blocks=thinking_blocks or [],
435
+ responses_reasoning_item=responses_reasoning_item,
436
+ tool_call=tool_call,
437
+ tool_name=tool_call.name,
438
+ tool_call_id=tool_call.id,
439
+ llm_response_id=llm_response_id,
440
+ action=None,
441
+ )
442
+ on_event(tc_event)
443
+ event = AgentErrorEvent(
444
+ error=err,
445
+ tool_name=tool_name,
446
+ tool_call_id=tool_call.id,
447
+ )
448
+ on_event(event)
449
+ return
450
+
451
+ action_event = ActionEvent(
452
+ action=action,
453
+ thought=thought or [],
454
+ reasoning_content=reasoning_content,
455
+ thinking_blocks=thinking_blocks or [],
456
+ responses_reasoning_item=responses_reasoning_item,
457
+ tool_name=tool.name,
458
+ tool_call_id=tool_call.id,
459
+ tool_call=tool_call,
460
+ llm_response_id=llm_response_id,
461
+ security_risk=security_risk,
462
+ )
463
+ on_event(action_event)
464
+ return action_event
465
+
466
+ @observe(ignore_inputs=["state", "on_event"])
467
+ def _execute_action_event(
468
+ self,
469
+ conversation: LocalConversation,
470
+ action_event: ActionEvent,
471
+ on_event: ConversationCallbackType,
472
+ ):
473
+ """Execute an action event and update the conversation state.
474
+
475
+ It will call the tool's executor and update the state & call callback fn
476
+ with the observation.
477
+
478
+ If the action was blocked by a PreToolUse hook (recorded in
479
+ state.blocked_actions), a UserRejectObservation is emitted instead
480
+ of executing the action.
481
+ """
482
+ state = conversation.state
483
+
484
+ # Check if this action was blocked by a PreToolUse hook
485
+ reason = state.pop_blocked_action(action_event.id)
486
+ if reason is not None:
487
+ logger.info(f"Action '{action_event.tool_name}' blocked by hook: {reason}")
488
+ rejection = UserRejectObservation(
489
+ action_id=action_event.id,
490
+ tool_name=action_event.tool_name,
491
+ tool_call_id=action_event.tool_call_id,
492
+ rejection_reason=reason,
493
+ )
494
+ on_event(rejection)
495
+ return rejection
496
+
497
+ tool = self.tools_map.get(action_event.tool_name, None)
498
+ if tool is None:
499
+ raise RuntimeError(
500
+ f"Tool '{action_event.tool_name}' not found. This should not happen "
501
+ "as it was checked earlier."
502
+ )
503
+
504
+ # Execute actions!
505
+ try:
506
+ if should_enable_observability():
507
+ tool_name = extract_action_name(action_event)
508
+ observation: Observation = observe(name=tool_name, span_type="TOOL")(
509
+ tool
510
+ )(action_event.action, conversation)
511
+ else:
512
+ observation = tool(action_event.action, conversation)
513
+ assert isinstance(observation, Observation), (
514
+ f"Tool '{tool.name}' executor must return an Observation"
515
+ )
516
+ except ValueError as e:
517
+ # Tool execution raised a ValueError (e.g., invalid argument combination)
518
+ # Convert to AgentErrorEvent so the agent can correct itself
519
+ err = f"Error executing tool '{tool.name}': {e}"
520
+ logger.warning(err)
521
+ error_event = AgentErrorEvent(
522
+ error=err,
523
+ tool_name=tool.name,
524
+ tool_call_id=action_event.tool_call.id,
525
+ )
526
+ on_event(error_event)
527
+ return error_event
528
+
529
+ obs_event = ObservationEvent(
530
+ observation=observation,
531
+ action_id=action_event.id,
532
+ tool_name=tool.name,
533
+ tool_call_id=action_event.tool_call.id,
534
+ )
535
+ on_event(obs_event)
536
+
537
+ # Set conversation state
538
+ if tool.name == FinishTool.name:
539
+ state.execution_status = ConversationExecutionStatus.FINISHED
540
+ return obs_event
541
+
542
+ def _maybe_emit_vllm_tokens(
543
+ self, llm_response: LLMResponse, on_event: ConversationCallbackType
544
+ ) -> None:
545
+ if (
546
+ "return_token_ids" in self.llm.litellm_extra_body
547
+ ) and self.llm.litellm_extra_body["return_token_ids"]:
548
+ token_event = TokenEvent(
549
+ source="agent",
550
+ prompt_token_ids=llm_response.raw_response["prompt_token_ids"],
551
+ response_token_ids=llm_response.raw_response["choices"][0][
552
+ "provider_specific_fields"
553
+ ]["token_ids"],
554
+ )
555
+ on_event(token_event)
556
+
557
+ def _log_context_window_exceeded_warning(self) -> None:
558
+ """Log a helpful warning when context window is exceeded without a condenser."""
559
+ if self.condenser is None:
560
+ logger.warning(
561
+ "\n"
562
+ "=" * 80 + "\n"
563
+ "⚠️ CONTEXT WINDOW EXCEEDED ERROR\n"
564
+ "=" * 80 + "\n"
565
+ "\n"
566
+ "The LLM's context window has been exceeded, but no condenser is "
567
+ "configured.\n"
568
+ "\n"
569
+ "Current configuration:\n"
570
+ f" • Condenser: None\n"
571
+ f" • LLM Model: {self.llm.model}\n"
572
+ "\n"
573
+ "To prevent this error, configure a condenser to automatically "
574
+ "summarize\n"
575
+ "conversation history when it gets too long.\n"
576
+ "\n"
577
+ "Example configuration:\n"
578
+ "\n"
579
+ " from openhands.sdk import Agent, LLM\n"
580
+ " from openhands.sdk.context.condenser import "
581
+ "LLMSummarizingCondenser\n"
582
+ "\n"
583
+ " agent = Agent(\n"
584
+ " llm=LLM(model='your-model'),\n"
585
+ " condenser=LLMSummarizingCondenser(\n"
586
+ " llm=LLM(model='your-model'), # Can use same or "
587
+ "cheaper model\n"
588
+ " max_size=120, # Maximum events before condensation\n"
589
+ " keep_first=4 # Number of initial events to preserve\n"
590
+ " )\n"
591
+ " )\n"
592
+ "\n"
593
+ "For more information, see: "
594
+ "https://docs.openhands.dev/sdk/guides/context-condenser\n"
595
+ "=" * 80
596
+ )
597
+ else:
598
+ condenser_type = type(self.condenser).__name__
599
+ handles_requests = self.condenser.handles_condensation_requests()
600
+ condenser_config = self.condenser.model_dump(
601
+ exclude={"llm"}, exclude_none=True
602
+ )
603
+ condenser_llm_obj = getattr(self.condenser, "llm", None)
604
+ condenser_llm = (
605
+ condenser_llm_obj.model if condenser_llm_obj is not None else "N/A"
606
+ )
607
+
608
+ logger.warning(
609
+ "\n"
610
+ "=" * 80 + "\n"
611
+ "⚠️ CONTEXT WINDOW EXCEEDED ERROR\n"
612
+ "=" * 80 + "\n"
613
+ "\n"
614
+ "The LLM's context window has been exceeded.\n"
615
+ "\n"
616
+ "Current configuration:\n"
617
+ f" • Condenser Type: {condenser_type}\n"
618
+ f" • Handles Condensation Requests: {handles_requests}\n"
619
+ f" • Condenser LLM: {condenser_llm}\n"
620
+ f" • Agent LLM Model: {self.llm.model}\n"
621
+ f" • Condenser Config: {json.dumps(condenser_config, indent=4)}\n"
622
+ "\n"
623
+ "Your condenser is configured but does not handle condensation "
624
+ "requests\n"
625
+ "(handles_condensation_requests() returned False).\n"
626
+ "\n"
627
+ "To fix this:\n"
628
+ " 1. Use LLMSummarizingCondenser which handles condensation "
629
+ "requests, OR\n"
630
+ " 2. Implement handles_condensation_requests() in your custom "
631
+ "condenser\n"
632
+ "\n"
633
+ "Example with LLMSummarizingCondenser:\n"
634
+ "\n"
635
+ " from openhands.sdk.context.condenser import "
636
+ "LLMSummarizingCondenser\n"
637
+ "\n"
638
+ " agent = Agent(\n"
639
+ " llm=LLM(model='your-model'),\n"
640
+ " condenser=LLMSummarizingCondenser(\n"
641
+ " llm=LLM(model='your-model'),\n"
642
+ " max_size=120,\n"
643
+ " keep_first=4\n"
644
+ " )\n"
645
+ " )\n"
646
+ "\n"
647
+ "For more information, see: "
648
+ "https://docs.openhands.dev/sdk/guides/context-condenser\n"
649
+ "=" * 80
650
+ )