openhands-sdk 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. openhands/sdk/__init__.py +111 -0
  2. openhands/sdk/agent/__init__.py +8 -0
  3. openhands/sdk/agent/agent.py +607 -0
  4. openhands/sdk/agent/base.py +454 -0
  5. openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
  6. openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
  7. openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
  8. openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
  9. openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +3 -0
  10. openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
  11. openhands/sdk/agent/prompts/security_policy.j2 +22 -0
  12. openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
  13. openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
  14. openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
  15. openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
  16. openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
  17. openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
  18. openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
  19. openhands/sdk/agent/utils.py +223 -0
  20. openhands/sdk/context/__init__.py +28 -0
  21. openhands/sdk/context/agent_context.py +240 -0
  22. openhands/sdk/context/condenser/__init__.py +18 -0
  23. openhands/sdk/context/condenser/base.py +95 -0
  24. openhands/sdk/context/condenser/llm_summarizing_condenser.py +89 -0
  25. openhands/sdk/context/condenser/no_op_condenser.py +13 -0
  26. openhands/sdk/context/condenser/pipeline_condenser.py +55 -0
  27. openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
  28. openhands/sdk/context/prompts/__init__.py +6 -0
  29. openhands/sdk/context/prompts/prompt.py +114 -0
  30. openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
  31. openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
  32. openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
  33. openhands/sdk/context/skills/__init__.py +28 -0
  34. openhands/sdk/context/skills/exceptions.py +11 -0
  35. openhands/sdk/context/skills/skill.py +630 -0
  36. openhands/sdk/context/skills/trigger.py +36 -0
  37. openhands/sdk/context/skills/types.py +48 -0
  38. openhands/sdk/context/view.py +306 -0
  39. openhands/sdk/conversation/__init__.py +40 -0
  40. openhands/sdk/conversation/base.py +281 -0
  41. openhands/sdk/conversation/conversation.py +146 -0
  42. openhands/sdk/conversation/conversation_stats.py +85 -0
  43. openhands/sdk/conversation/event_store.py +157 -0
  44. openhands/sdk/conversation/events_list_base.py +17 -0
  45. openhands/sdk/conversation/exceptions.py +50 -0
  46. openhands/sdk/conversation/fifo_lock.py +133 -0
  47. openhands/sdk/conversation/impl/__init__.py +5 -0
  48. openhands/sdk/conversation/impl/local_conversation.py +620 -0
  49. openhands/sdk/conversation/impl/remote_conversation.py +883 -0
  50. openhands/sdk/conversation/persistence_const.py +9 -0
  51. openhands/sdk/conversation/response_utils.py +41 -0
  52. openhands/sdk/conversation/secret_registry.py +126 -0
  53. openhands/sdk/conversation/serialization_diff.py +0 -0
  54. openhands/sdk/conversation/state.py +352 -0
  55. openhands/sdk/conversation/stuck_detector.py +311 -0
  56. openhands/sdk/conversation/title_utils.py +191 -0
  57. openhands/sdk/conversation/types.py +45 -0
  58. openhands/sdk/conversation/visualizer/__init__.py +12 -0
  59. openhands/sdk/conversation/visualizer/base.py +67 -0
  60. openhands/sdk/conversation/visualizer/default.py +373 -0
  61. openhands/sdk/critic/__init__.py +15 -0
  62. openhands/sdk/critic/base.py +38 -0
  63. openhands/sdk/critic/impl/__init__.py +12 -0
  64. openhands/sdk/critic/impl/agent_finished.py +83 -0
  65. openhands/sdk/critic/impl/empty_patch.py +49 -0
  66. openhands/sdk/critic/impl/pass_critic.py +42 -0
  67. openhands/sdk/event/__init__.py +42 -0
  68. openhands/sdk/event/base.py +149 -0
  69. openhands/sdk/event/condenser.py +82 -0
  70. openhands/sdk/event/conversation_error.py +25 -0
  71. openhands/sdk/event/conversation_state.py +104 -0
  72. openhands/sdk/event/llm_completion_log.py +39 -0
  73. openhands/sdk/event/llm_convertible/__init__.py +20 -0
  74. openhands/sdk/event/llm_convertible/action.py +139 -0
  75. openhands/sdk/event/llm_convertible/message.py +142 -0
  76. openhands/sdk/event/llm_convertible/observation.py +141 -0
  77. openhands/sdk/event/llm_convertible/system.py +61 -0
  78. openhands/sdk/event/token.py +16 -0
  79. openhands/sdk/event/types.py +11 -0
  80. openhands/sdk/event/user_action.py +21 -0
  81. openhands/sdk/git/exceptions.py +43 -0
  82. openhands/sdk/git/git_changes.py +249 -0
  83. openhands/sdk/git/git_diff.py +129 -0
  84. openhands/sdk/git/models.py +21 -0
  85. openhands/sdk/git/utils.py +189 -0
  86. openhands/sdk/io/__init__.py +6 -0
  87. openhands/sdk/io/base.py +48 -0
  88. openhands/sdk/io/local.py +82 -0
  89. openhands/sdk/io/memory.py +54 -0
  90. openhands/sdk/llm/__init__.py +45 -0
  91. openhands/sdk/llm/exceptions/__init__.py +45 -0
  92. openhands/sdk/llm/exceptions/classifier.py +50 -0
  93. openhands/sdk/llm/exceptions/mapping.py +54 -0
  94. openhands/sdk/llm/exceptions/types.py +101 -0
  95. openhands/sdk/llm/llm.py +1140 -0
  96. openhands/sdk/llm/llm_registry.py +122 -0
  97. openhands/sdk/llm/llm_response.py +59 -0
  98. openhands/sdk/llm/message.py +656 -0
  99. openhands/sdk/llm/mixins/fn_call_converter.py +1243 -0
  100. openhands/sdk/llm/mixins/non_native_fc.py +93 -0
  101. openhands/sdk/llm/options/__init__.py +1 -0
  102. openhands/sdk/llm/options/chat_options.py +93 -0
  103. openhands/sdk/llm/options/common.py +19 -0
  104. openhands/sdk/llm/options/responses_options.py +67 -0
  105. openhands/sdk/llm/router/__init__.py +10 -0
  106. openhands/sdk/llm/router/base.py +117 -0
  107. openhands/sdk/llm/router/impl/multimodal.py +76 -0
  108. openhands/sdk/llm/router/impl/random.py +22 -0
  109. openhands/sdk/llm/streaming.py +9 -0
  110. openhands/sdk/llm/utils/metrics.py +312 -0
  111. openhands/sdk/llm/utils/model_features.py +191 -0
  112. openhands/sdk/llm/utils/model_info.py +90 -0
  113. openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
  114. openhands/sdk/llm/utils/retry_mixin.py +128 -0
  115. openhands/sdk/llm/utils/telemetry.py +362 -0
  116. openhands/sdk/llm/utils/unverified_models.py +156 -0
  117. openhands/sdk/llm/utils/verified_models.py +66 -0
  118. openhands/sdk/logger/__init__.py +22 -0
  119. openhands/sdk/logger/logger.py +195 -0
  120. openhands/sdk/logger/rolling.py +113 -0
  121. openhands/sdk/mcp/__init__.py +24 -0
  122. openhands/sdk/mcp/client.py +76 -0
  123. openhands/sdk/mcp/definition.py +106 -0
  124. openhands/sdk/mcp/exceptions.py +19 -0
  125. openhands/sdk/mcp/tool.py +270 -0
  126. openhands/sdk/mcp/utils.py +83 -0
  127. openhands/sdk/observability/__init__.py +4 -0
  128. openhands/sdk/observability/laminar.py +166 -0
  129. openhands/sdk/observability/utils.py +20 -0
  130. openhands/sdk/py.typed +0 -0
  131. openhands/sdk/secret/__init__.py +19 -0
  132. openhands/sdk/secret/secrets.py +92 -0
  133. openhands/sdk/security/__init__.py +6 -0
  134. openhands/sdk/security/analyzer.py +111 -0
  135. openhands/sdk/security/confirmation_policy.py +61 -0
  136. openhands/sdk/security/llm_analyzer.py +29 -0
  137. openhands/sdk/security/risk.py +100 -0
  138. openhands/sdk/tool/__init__.py +34 -0
  139. openhands/sdk/tool/builtins/__init__.py +34 -0
  140. openhands/sdk/tool/builtins/finish.py +106 -0
  141. openhands/sdk/tool/builtins/think.py +117 -0
  142. openhands/sdk/tool/registry.py +161 -0
  143. openhands/sdk/tool/schema.py +276 -0
  144. openhands/sdk/tool/spec.py +39 -0
  145. openhands/sdk/tool/tool.py +481 -0
  146. openhands/sdk/utils/__init__.py +22 -0
  147. openhands/sdk/utils/async_executor.py +115 -0
  148. openhands/sdk/utils/async_utils.py +39 -0
  149. openhands/sdk/utils/cipher.py +68 -0
  150. openhands/sdk/utils/command.py +90 -0
  151. openhands/sdk/utils/deprecation.py +166 -0
  152. openhands/sdk/utils/github.py +44 -0
  153. openhands/sdk/utils/json.py +48 -0
  154. openhands/sdk/utils/models.py +570 -0
  155. openhands/sdk/utils/paging.py +63 -0
  156. openhands/sdk/utils/pydantic_diff.py +85 -0
  157. openhands/sdk/utils/pydantic_secrets.py +64 -0
  158. openhands/sdk/utils/truncate.py +117 -0
  159. openhands/sdk/utils/visualize.py +58 -0
  160. openhands/sdk/workspace/__init__.py +17 -0
  161. openhands/sdk/workspace/base.py +158 -0
  162. openhands/sdk/workspace/local.py +189 -0
  163. openhands/sdk/workspace/models.py +35 -0
  164. openhands/sdk/workspace/remote/__init__.py +8 -0
  165. openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
  166. openhands/sdk/workspace/remote/base.py +164 -0
  167. openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
  168. openhands/sdk/workspace/workspace.py +49 -0
  169. openhands_sdk-1.7.0.dist-info/METADATA +17 -0
  170. openhands_sdk-1.7.0.dist-info/RECORD +172 -0
  171. openhands_sdk-1.7.0.dist-info/WHEEL +5 -0
  172. openhands_sdk-1.7.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,607 @@
1
+ import json
2
+
3
+ from pydantic import ValidationError, model_validator
4
+
5
+ import openhands.sdk.security.analyzer as analyzer
6
+ import openhands.sdk.security.risk as risk
7
+ from openhands.sdk.agent.base import AgentBase
8
+ from openhands.sdk.agent.utils import (
9
+ fix_malformed_tool_arguments,
10
+ make_llm_completion,
11
+ prepare_llm_messages,
12
+ )
13
+ from openhands.sdk.conversation import (
14
+ ConversationCallbackType,
15
+ ConversationState,
16
+ ConversationTokenCallbackType,
17
+ LocalConversation,
18
+ )
19
+ from openhands.sdk.conversation.state import ConversationExecutionStatus
20
+ from openhands.sdk.event import (
21
+ ActionEvent,
22
+ AgentErrorEvent,
23
+ LLMConvertibleEvent,
24
+ MessageEvent,
25
+ ObservationEvent,
26
+ SystemPromptEvent,
27
+ TokenEvent,
28
+ )
29
+ from openhands.sdk.event.condenser import Condensation, CondensationRequest
30
+ from openhands.sdk.llm import (
31
+ LLMResponse,
32
+ Message,
33
+ MessageToolCall,
34
+ ReasoningItemModel,
35
+ RedactedThinkingBlock,
36
+ TextContent,
37
+ ThinkingBlock,
38
+ )
39
+ from openhands.sdk.llm.exceptions import (
40
+ FunctionCallValidationError,
41
+ LLMContextWindowExceedError,
42
+ )
43
+ from openhands.sdk.logger import get_logger
44
+ from openhands.sdk.observability.laminar import (
45
+ maybe_init_laminar,
46
+ observe,
47
+ should_enable_observability,
48
+ )
49
+ from openhands.sdk.observability.utils import extract_action_name
50
+ from openhands.sdk.security.llm_analyzer import LLMSecurityAnalyzer
51
+ from openhands.sdk.tool import (
52
+ Action,
53
+ Observation,
54
+ )
55
+ from openhands.sdk.tool.builtins import (
56
+ FinishAction,
57
+ FinishTool,
58
+ ThinkAction,
59
+ )
60
+
61
+
62
+ logger = get_logger(__name__)
63
+ maybe_init_laminar()
64
+
65
+
66
+ class Agent(AgentBase):
67
+ """Main agent implementation for OpenHands.
68
+
69
+ The Agent class provides the core functionality for running AI agents that can
70
+ interact with tools, process messages, and execute actions. It inherits from
71
+ AgentBase and implements the agent execution logic.
72
+
73
+ Example:
74
+ >>> from openhands.sdk import LLM, Agent, Tool
75
+ >>> llm = LLM(model="claude-sonnet-4-20250514", api_key=SecretStr("key"))
76
+ >>> tools = [Tool(name="TerminalTool"), Tool(name="FileEditorTool")]
77
+ >>> agent = Agent(llm=llm, tools=tools)
78
+ """
79
+
80
+ @model_validator(mode="before")
81
+ @classmethod
82
+ def _add_security_prompt_as_default(cls, data):
83
+ """Ensure llm_security_analyzer=True is always set before initialization."""
84
+ if not isinstance(data, dict):
85
+ return data
86
+
87
+ kwargs = data.get("system_prompt_kwargs") or {}
88
+ if not isinstance(kwargs, dict):
89
+ kwargs = {}
90
+
91
+ kwargs.setdefault("llm_security_analyzer", True)
92
+ data["system_prompt_kwargs"] = kwargs
93
+ return data
94
+
95
+ def init_state(
96
+ self,
97
+ state: ConversationState,
98
+ on_event: ConversationCallbackType,
99
+ ) -> None:
100
+ super().init_state(state, on_event=on_event)
101
+ # TODO(openhands): we should add test to test this init_state will actually
102
+ # modify state in-place
103
+
104
+ llm_convertible_messages = [
105
+ event for event in state.events if isinstance(event, LLMConvertibleEvent)
106
+ ]
107
+ if len(llm_convertible_messages) == 0:
108
+ # Prepare system message
109
+ event = SystemPromptEvent(
110
+ source="agent",
111
+ system_prompt=TextContent(text=self.system_message),
112
+ # Tools are stored as ToolDefinition objects and converted to
113
+ # OpenAI format with security_risk parameter during LLM completion.
114
+ # See make_llm_completion() in agent/utils.py for details.
115
+ tools=list(self.tools_map.values()),
116
+ )
117
+ on_event(event)
118
+
119
+ def _execute_actions(
120
+ self,
121
+ conversation: LocalConversation,
122
+ action_events: list[ActionEvent],
123
+ on_event: ConversationCallbackType,
124
+ ):
125
+ for action_event in action_events:
126
+ self._execute_action_event(conversation, action_event, on_event=on_event)
127
+
128
+ @observe(name="agent.step", ignore_inputs=["state", "on_event"])
129
+ def step(
130
+ self,
131
+ conversation: LocalConversation,
132
+ on_event: ConversationCallbackType,
133
+ on_token: ConversationTokenCallbackType | None = None,
134
+ ) -> None:
135
+ state = conversation.state
136
+ # Check for pending actions (implicit confirmation)
137
+ # and execute them before sampling new actions.
138
+ pending_actions = ConversationState.get_unmatched_actions(state.events)
139
+ if pending_actions:
140
+ logger.info(
141
+ "Confirmation mode: Executing %d pending action(s)",
142
+ len(pending_actions),
143
+ )
144
+ self._execute_actions(conversation, pending_actions, on_event)
145
+ return
146
+
147
+ # Prepare LLM messages using the utility function
148
+ _messages_or_condensation = prepare_llm_messages(
149
+ state.events, condenser=self.condenser
150
+ )
151
+
152
+ # Process condensation event before agent sampels another action
153
+ if isinstance(_messages_or_condensation, Condensation):
154
+ on_event(_messages_or_condensation)
155
+ return
156
+
157
+ _messages = _messages_or_condensation
158
+
159
+ logger.debug(
160
+ "Sending messages to LLM: "
161
+ f"{json.dumps([m.model_dump() for m in _messages[1:]], indent=2)}"
162
+ )
163
+
164
+ try:
165
+ llm_response = make_llm_completion(
166
+ self.llm,
167
+ _messages,
168
+ tools=list(self.tools_map.values()),
169
+ on_token=on_token,
170
+ )
171
+ except FunctionCallValidationError as e:
172
+ logger.warning(f"LLM generated malformed function call: {e}")
173
+ error_message = MessageEvent(
174
+ source="user",
175
+ llm_message=Message(
176
+ role="user",
177
+ content=[TextContent(text=str(e))],
178
+ ),
179
+ )
180
+ on_event(error_message)
181
+ return
182
+ except LLMContextWindowExceedError as e:
183
+ # If condenser is available and handles requests, trigger condensation
184
+ if (
185
+ self.condenser is not None
186
+ and self.condenser.handles_condensation_requests()
187
+ ):
188
+ logger.warning(
189
+ "LLM raised context window exceeded error, triggering condensation"
190
+ )
191
+ on_event(CondensationRequest())
192
+ return
193
+ # No condenser available or doesn't handle requests; log helpful warning
194
+ self._log_context_window_exceeded_warning()
195
+ raise e
196
+
197
+ # LLMResponse already contains the converted message and metrics snapshot
198
+ message: Message = llm_response.message
199
+
200
+ # Check if this is a reasoning-only response (e.g., from reasoning models)
201
+ # or a message-only response without tool calls
202
+ has_reasoning = (
203
+ message.responses_reasoning_item is not None
204
+ or message.reasoning_content is not None
205
+ or (message.thinking_blocks and len(message.thinking_blocks) > 0)
206
+ )
207
+ has_content = any(
208
+ isinstance(c, TextContent) and c.text.strip() for c in message.content
209
+ )
210
+
211
+ if message.tool_calls and len(message.tool_calls) > 0:
212
+ if not all(isinstance(c, TextContent) for c in message.content):
213
+ logger.warning(
214
+ "LLM returned tool calls but message content is not all "
215
+ "TextContent - ignoring non-text content"
216
+ )
217
+
218
+ # Generate unique batch ID for this LLM response
219
+ thought_content = [c for c in message.content if isinstance(c, TextContent)]
220
+
221
+ action_events: list[ActionEvent] = []
222
+ for i, tool_call in enumerate(message.tool_calls):
223
+ action_event = self._get_action_event(
224
+ tool_call,
225
+ llm_response_id=llm_response.id,
226
+ on_event=on_event,
227
+ security_analyzer=state.security_analyzer,
228
+ thought=thought_content
229
+ if i == 0
230
+ else [], # Only first gets thought
231
+ # Only first gets reasoning content
232
+ reasoning_content=message.reasoning_content if i == 0 else None,
233
+ # Only first gets thinking blocks
234
+ thinking_blocks=list(message.thinking_blocks) if i == 0 else [],
235
+ responses_reasoning_item=message.responses_reasoning_item
236
+ if i == 0
237
+ else None,
238
+ )
239
+ if action_event is None:
240
+ continue
241
+ action_events.append(action_event)
242
+
243
+ # Handle confirmation mode - exit early if actions need confirmation
244
+ if self._requires_user_confirmation(state, action_events):
245
+ return
246
+
247
+ if action_events:
248
+ self._execute_actions(conversation, action_events, on_event)
249
+
250
+ # Emit VLLM token ids if enabled before returning
251
+ self._maybe_emit_vllm_tokens(llm_response, on_event)
252
+ return
253
+
254
+ # No tool calls - emit message event for reasoning or content responses
255
+ if not has_reasoning and not has_content:
256
+ logger.warning("LLM produced empty response - continuing agent loop")
257
+
258
+ msg_event = MessageEvent(
259
+ source="agent",
260
+ llm_message=message,
261
+ llm_response_id=llm_response.id,
262
+ )
263
+ on_event(msg_event)
264
+
265
+ # Emit VLLM token ids if enabled
266
+ self._maybe_emit_vllm_tokens(llm_response, on_event)
267
+
268
+ # Finish conversation if LLM produced content (awaits user input)
269
+ # Continue if only reasoning without content (e.g., GPT-5 codex thinking)
270
+ if has_content:
271
+ logger.debug("LLM produced a message response - awaits user input")
272
+ state.execution_status = ConversationExecutionStatus.FINISHED
273
+ return
274
+
275
+ def _requires_user_confirmation(
276
+ self, state: ConversationState, action_events: list[ActionEvent]
277
+ ) -> bool:
278
+ """
279
+ Decide whether user confirmation is needed to proceed.
280
+
281
+ Rules:
282
+ 1. Confirmation mode is enabled
283
+ 2. Every action requires confirmation
284
+ 3. A single `FinishAction` never requires confirmation
285
+ 4. A single `ThinkAction` never requires confirmation
286
+ """
287
+ # A single `FinishAction` or `ThinkAction` never requires confirmation
288
+ if len(action_events) == 1 and isinstance(
289
+ action_events[0].action, (FinishAction, ThinkAction)
290
+ ):
291
+ return False
292
+
293
+ # If there are no actions there is nothing to confirm
294
+ if len(action_events) == 0:
295
+ return False
296
+
297
+ # If a security analyzer is registered, use it to grab the risks of the actions
298
+ # involved. If not, we'll set the risks to UNKNOWN.
299
+ if state.security_analyzer is not None:
300
+ risks = [
301
+ risk
302
+ for _, risk in state.security_analyzer.analyze_pending_actions(
303
+ action_events
304
+ )
305
+ ]
306
+ else:
307
+ risks = [risk.SecurityRisk.UNKNOWN] * len(action_events)
308
+
309
+ # Grab the confirmation policy from the state and pass in the risks.
310
+ if any(state.confirmation_policy.should_confirm(risk) for risk in risks):
311
+ state.execution_status = (
312
+ ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
313
+ )
314
+ return True
315
+
316
+ return False
317
+
318
+ def _extract_security_risk(
319
+ self,
320
+ arguments: dict,
321
+ tool_name: str,
322
+ read_only_tool: bool,
323
+ security_analyzer: analyzer.SecurityAnalyzerBase | None = None,
324
+ ) -> risk.SecurityRisk:
325
+ requires_sr = isinstance(security_analyzer, LLMSecurityAnalyzer)
326
+ raw = arguments.pop("security_risk", None)
327
+
328
+ # Default risk value for action event
329
+ # Tool is marked as read-only so security risk can be ignored
330
+ if read_only_tool:
331
+ return risk.SecurityRisk.UNKNOWN
332
+
333
+ # Raises exception if failed to pass risk field when expected
334
+ # Exception will be sent back to agent as error event
335
+ # Strong models like GPT-5 can correct itself by retrying
336
+ if requires_sr and raw is None:
337
+ raise ValueError(
338
+ f"Failed to provide security_risk field in tool '{tool_name}'"
339
+ )
340
+
341
+ # When using weaker models without security analyzer
342
+ # safely ignore missing security risk fields
343
+ if not requires_sr and raw is None:
344
+ return risk.SecurityRisk.UNKNOWN
345
+
346
+ # Raises exception if invalid risk enum passed by LLM
347
+ security_risk = risk.SecurityRisk(raw)
348
+ return security_risk
349
+
350
+ def _get_action_event(
351
+ self,
352
+ tool_call: MessageToolCall,
353
+ llm_response_id: str,
354
+ on_event: ConversationCallbackType,
355
+ security_analyzer: analyzer.SecurityAnalyzerBase | None = None,
356
+ thought: list[TextContent] | None = None,
357
+ reasoning_content: str | None = None,
358
+ thinking_blocks: list[ThinkingBlock | RedactedThinkingBlock] | None = None,
359
+ responses_reasoning_item: ReasoningItemModel | None = None,
360
+ ) -> ActionEvent | None:
361
+ """Converts a tool call into an ActionEvent, validating arguments.
362
+
363
+ NOTE: state will be mutated in-place.
364
+ """
365
+ tool_name = tool_call.name
366
+ tool = self.tools_map.get(tool_name, None)
367
+ # Handle non-existing tools
368
+ if tool is None:
369
+ available = list(self.tools_map.keys())
370
+ err = f"Tool '{tool_name}' not found. Available: {available}"
371
+ logger.error(err)
372
+ # Persist assistant function_call so next turn has matching call_id
373
+ tc_event = ActionEvent(
374
+ source="agent",
375
+ thought=thought or [],
376
+ reasoning_content=reasoning_content,
377
+ thinking_blocks=thinking_blocks or [],
378
+ responses_reasoning_item=responses_reasoning_item,
379
+ tool_call=tool_call,
380
+ tool_name=tool_call.name,
381
+ tool_call_id=tool_call.id,
382
+ llm_response_id=llm_response_id,
383
+ action=None,
384
+ )
385
+ on_event(tc_event)
386
+ event = AgentErrorEvent(
387
+ error=err,
388
+ tool_name=tool_name,
389
+ tool_call_id=tool_call.id,
390
+ )
391
+ on_event(event)
392
+ return
393
+
394
+ # Validate arguments
395
+ security_risk: risk.SecurityRisk = risk.SecurityRisk.UNKNOWN
396
+ try:
397
+ arguments = json.loads(tool_call.arguments)
398
+
399
+ # Fix malformed arguments (e.g., JSON strings for list/dict fields)
400
+ arguments = fix_malformed_tool_arguments(arguments, tool.action_type)
401
+ security_risk = self._extract_security_risk(
402
+ arguments,
403
+ tool.name,
404
+ tool.annotations.readOnlyHint if tool.annotations else False,
405
+ security_analyzer,
406
+ )
407
+ assert "security_risk" not in arguments, (
408
+ "Unexpected 'security_risk' key found in tool arguments"
409
+ )
410
+
411
+ action: Action = tool.action_from_arguments(arguments)
412
+ except (json.JSONDecodeError, ValidationError, ValueError) as e:
413
+ err = (
414
+ f"Error validating args {tool_call.arguments} for tool "
415
+ f"'{tool.name}': {e}"
416
+ )
417
+ # Persist assistant function_call so next turn has matching call_id
418
+ tc_event = ActionEvent(
419
+ source="agent",
420
+ thought=thought or [],
421
+ reasoning_content=reasoning_content,
422
+ thinking_blocks=thinking_blocks or [],
423
+ responses_reasoning_item=responses_reasoning_item,
424
+ tool_call=tool_call,
425
+ tool_name=tool_call.name,
426
+ tool_call_id=tool_call.id,
427
+ llm_response_id=llm_response_id,
428
+ action=None,
429
+ )
430
+ on_event(tc_event)
431
+ event = AgentErrorEvent(
432
+ error=err,
433
+ tool_name=tool_name,
434
+ tool_call_id=tool_call.id,
435
+ )
436
+ on_event(event)
437
+ return
438
+
439
+ action_event = ActionEvent(
440
+ action=action,
441
+ thought=thought or [],
442
+ reasoning_content=reasoning_content,
443
+ thinking_blocks=thinking_blocks or [],
444
+ responses_reasoning_item=responses_reasoning_item,
445
+ tool_name=tool.name,
446
+ tool_call_id=tool_call.id,
447
+ tool_call=tool_call,
448
+ llm_response_id=llm_response_id,
449
+ security_risk=security_risk,
450
+ )
451
+ on_event(action_event)
452
+ return action_event
453
+
454
+ @observe(ignore_inputs=["state", "on_event"])
455
+ def _execute_action_event(
456
+ self,
457
+ conversation: LocalConversation,
458
+ action_event: ActionEvent,
459
+ on_event: ConversationCallbackType,
460
+ ):
461
+ """Execute an action event and update the conversation state.
462
+
463
+ It will call the tool's executor and update the state & call callback fn
464
+ with the observation.
465
+ """
466
+ state = conversation.state
467
+ tool = self.tools_map.get(action_event.tool_name, None)
468
+ if tool is None:
469
+ raise RuntimeError(
470
+ f"Tool '{action_event.tool_name}' not found. This should not happen "
471
+ "as it was checked earlier."
472
+ )
473
+
474
+ # Execute actions!
475
+ if should_enable_observability():
476
+ tool_name = extract_action_name(action_event)
477
+ observation: Observation = observe(name=tool_name, span_type="TOOL")(tool)(
478
+ action_event.action, conversation
479
+ )
480
+ else:
481
+ observation = tool(action_event.action, conversation)
482
+ assert isinstance(observation, Observation), (
483
+ f"Tool '{tool.name}' executor must return an Observation"
484
+ )
485
+
486
+ obs_event = ObservationEvent(
487
+ observation=observation,
488
+ action_id=action_event.id,
489
+ tool_name=tool.name,
490
+ tool_call_id=action_event.tool_call.id,
491
+ )
492
+ on_event(obs_event)
493
+
494
+ # Set conversation state
495
+ if tool.name == FinishTool.name:
496
+ state.execution_status = ConversationExecutionStatus.FINISHED
497
+ return obs_event
498
+
499
+ def _maybe_emit_vllm_tokens(
500
+ self, llm_response: LLMResponse, on_event: ConversationCallbackType
501
+ ) -> None:
502
+ if (
503
+ "return_token_ids" in self.llm.litellm_extra_body
504
+ ) and self.llm.litellm_extra_body["return_token_ids"]:
505
+ token_event = TokenEvent(
506
+ source="agent",
507
+ prompt_token_ids=llm_response.raw_response["prompt_token_ids"],
508
+ response_token_ids=llm_response.raw_response["choices"][0][
509
+ "provider_specific_fields"
510
+ ]["token_ids"],
511
+ )
512
+ on_event(token_event)
513
+
514
+ def _log_context_window_exceeded_warning(self) -> None:
515
+ """Log a helpful warning when context window is exceeded without a condenser."""
516
+ if self.condenser is None:
517
+ logger.warning(
518
+ "\n"
519
+ "=" * 80 + "\n"
520
+ "⚠️ CONTEXT WINDOW EXCEEDED ERROR\n"
521
+ "=" * 80 + "\n"
522
+ "\n"
523
+ "The LLM's context window has been exceeded, but no condenser is "
524
+ "configured.\n"
525
+ "\n"
526
+ "Current configuration:\n"
527
+ f" • Condenser: None\n"
528
+ f" • LLM Model: {self.llm.model}\n"
529
+ "\n"
530
+ "To prevent this error, configure a condenser to automatically "
531
+ "summarize\n"
532
+ "conversation history when it gets too long.\n"
533
+ "\n"
534
+ "Example configuration:\n"
535
+ "\n"
536
+ " from openhands.sdk import Agent, LLM\n"
537
+ " from openhands.sdk.context.condenser import "
538
+ "LLMSummarizingCondenser\n"
539
+ "\n"
540
+ " agent = Agent(\n"
541
+ " llm=LLM(model='your-model'),\n"
542
+ " condenser=LLMSummarizingCondenser(\n"
543
+ " llm=LLM(model='your-model'), # Can use same or "
544
+ "cheaper model\n"
545
+ " max_size=120, # Maximum events before condensation\n"
546
+ " keep_first=4 # Number of initial events to preserve\n"
547
+ " )\n"
548
+ " )\n"
549
+ "\n"
550
+ "For more information, see: "
551
+ "https://docs.openhands.dev/sdk/guides/context-condenser\n"
552
+ "=" * 80
553
+ )
554
+ else:
555
+ condenser_type = type(self.condenser).__name__
556
+ handles_requests = self.condenser.handles_condensation_requests()
557
+ condenser_config = self.condenser.model_dump(
558
+ exclude={"llm"}, exclude_none=True
559
+ )
560
+ condenser_llm_obj = getattr(self.condenser, "llm", None)
561
+ condenser_llm = (
562
+ condenser_llm_obj.model if condenser_llm_obj is not None else "N/A"
563
+ )
564
+
565
+ logger.warning(
566
+ "\n"
567
+ "=" * 80 + "\n"
568
+ "⚠️ CONTEXT WINDOW EXCEEDED ERROR\n"
569
+ "=" * 80 + "\n"
570
+ "\n"
571
+ "The LLM's context window has been exceeded.\n"
572
+ "\n"
573
+ "Current configuration:\n"
574
+ f" • Condenser Type: {condenser_type}\n"
575
+ f" • Handles Condensation Requests: {handles_requests}\n"
576
+ f" • Condenser LLM: {condenser_llm}\n"
577
+ f" • Agent LLM Model: {self.llm.model}\n"
578
+ f" • Condenser Config: {json.dumps(condenser_config, indent=4)}\n"
579
+ "\n"
580
+ "Your condenser is configured but does not handle condensation "
581
+ "requests\n"
582
+ "(handles_condensation_requests() returned False).\n"
583
+ "\n"
584
+ "To fix this:\n"
585
+ " 1. Use LLMSummarizingCondenser which handles condensation "
586
+ "requests, OR\n"
587
+ " 2. Implement handles_condensation_requests() in your custom "
588
+ "condenser\n"
589
+ "\n"
590
+ "Example with LLMSummarizingCondenser:\n"
591
+ "\n"
592
+ " from openhands.sdk.context.condenser import "
593
+ "LLMSummarizingCondenser\n"
594
+ "\n"
595
+ " agent = Agent(\n"
596
+ " llm=LLM(model='your-model'),\n"
597
+ " condenser=LLMSummarizingCondenser(\n"
598
+ " llm=LLM(model='your-model'),\n"
599
+ " max_size=120,\n"
600
+ " keep_first=4\n"
601
+ " )\n"
602
+ " )\n"
603
+ "\n"
604
+ "For more information, see: "
605
+ "https://docs.openhands.dev/sdk/guides/context-condenser\n"
606
+ "=" * 80
607
+ )