openhands 0.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openhands might be problematic. Click here for more details.

Files changed (124) hide show
  1. openhands-1.0.1.dist-info/METADATA +52 -0
  2. openhands-1.0.1.dist-info/RECORD +31 -0
  3. {openhands-0.0.0.dist-info → openhands-1.0.1.dist-info}/WHEEL +1 -2
  4. openhands-1.0.1.dist-info/entry_points.txt +2 -0
  5. openhands_cli/__init__.py +8 -0
  6. openhands_cli/agent_chat.py +186 -0
  7. openhands_cli/argparsers/main_parser.py +56 -0
  8. openhands_cli/argparsers/serve_parser.py +31 -0
  9. openhands_cli/gui_launcher.py +220 -0
  10. openhands_cli/listeners/__init__.py +4 -0
  11. openhands_cli/listeners/loading_listener.py +63 -0
  12. openhands_cli/listeners/pause_listener.py +83 -0
  13. openhands_cli/llm_utils.py +57 -0
  14. openhands_cli/locations.py +13 -0
  15. openhands_cli/pt_style.py +30 -0
  16. openhands_cli/runner.py +178 -0
  17. openhands_cli/setup.py +116 -0
  18. openhands_cli/simple_main.py +59 -0
  19. openhands_cli/tui/__init__.py +5 -0
  20. openhands_cli/tui/settings/mcp_screen.py +217 -0
  21. openhands_cli/tui/settings/settings_screen.py +202 -0
  22. openhands_cli/tui/settings/store.py +93 -0
  23. openhands_cli/tui/status.py +109 -0
  24. openhands_cli/tui/tui.py +100 -0
  25. openhands_cli/tui/utils.py +14 -0
  26. openhands_cli/user_actions/__init__.py +17 -0
  27. openhands_cli/user_actions/agent_action.py +95 -0
  28. openhands_cli/user_actions/exit_session.py +18 -0
  29. openhands_cli/user_actions/settings_action.py +171 -0
  30. openhands_cli/user_actions/types.py +18 -0
  31. openhands_cli/user_actions/utils.py +199 -0
  32. openhands/__init__.py +0 -1
  33. openhands/sdk/__init__.py +0 -45
  34. openhands/sdk/agent/__init__.py +0 -8
  35. openhands/sdk/agent/agent/__init__.py +0 -6
  36. openhands/sdk/agent/agent/agent.py +0 -349
  37. openhands/sdk/agent/base.py +0 -103
  38. openhands/sdk/context/__init__.py +0 -28
  39. openhands/sdk/context/agent_context.py +0 -153
  40. openhands/sdk/context/condenser/__init__.py +0 -5
  41. openhands/sdk/context/condenser/condenser.py +0 -73
  42. openhands/sdk/context/condenser/no_op_condenser.py +0 -13
  43. openhands/sdk/context/manager.py +0 -5
  44. openhands/sdk/context/microagents/__init__.py +0 -26
  45. openhands/sdk/context/microagents/exceptions.py +0 -11
  46. openhands/sdk/context/microagents/microagent.py +0 -345
  47. openhands/sdk/context/microagents/types.py +0 -70
  48. openhands/sdk/context/utils/__init__.py +0 -8
  49. openhands/sdk/context/utils/prompt.py +0 -52
  50. openhands/sdk/context/view.py +0 -116
  51. openhands/sdk/conversation/__init__.py +0 -12
  52. openhands/sdk/conversation/conversation.py +0 -207
  53. openhands/sdk/conversation/state.py +0 -50
  54. openhands/sdk/conversation/types.py +0 -6
  55. openhands/sdk/conversation/visualizer.py +0 -300
  56. openhands/sdk/event/__init__.py +0 -27
  57. openhands/sdk/event/base.py +0 -148
  58. openhands/sdk/event/condenser.py +0 -49
  59. openhands/sdk/event/llm_convertible.py +0 -265
  60. openhands/sdk/event/types.py +0 -5
  61. openhands/sdk/event/user_action.py +0 -12
  62. openhands/sdk/event/utils.py +0 -30
  63. openhands/sdk/llm/__init__.py +0 -19
  64. openhands/sdk/llm/exceptions.py +0 -108
  65. openhands/sdk/llm/llm.py +0 -867
  66. openhands/sdk/llm/llm_registry.py +0 -116
  67. openhands/sdk/llm/message.py +0 -216
  68. openhands/sdk/llm/metadata.py +0 -34
  69. openhands/sdk/llm/utils/fn_call_converter.py +0 -1049
  70. openhands/sdk/llm/utils/metrics.py +0 -311
  71. openhands/sdk/llm/utils/model_features.py +0 -153
  72. openhands/sdk/llm/utils/retry_mixin.py +0 -122
  73. openhands/sdk/llm/utils/telemetry.py +0 -252
  74. openhands/sdk/logger.py +0 -167
  75. openhands/sdk/mcp/__init__.py +0 -20
  76. openhands/sdk/mcp/client.py +0 -113
  77. openhands/sdk/mcp/definition.py +0 -69
  78. openhands/sdk/mcp/tool.py +0 -104
  79. openhands/sdk/mcp/utils.py +0 -59
  80. openhands/sdk/tests/llm/test_llm.py +0 -447
  81. openhands/sdk/tests/llm/test_llm_fncall_converter.py +0 -691
  82. openhands/sdk/tests/llm/test_model_features.py +0 -221
  83. openhands/sdk/tool/__init__.py +0 -30
  84. openhands/sdk/tool/builtins/__init__.py +0 -34
  85. openhands/sdk/tool/builtins/finish.py +0 -57
  86. openhands/sdk/tool/builtins/think.py +0 -60
  87. openhands/sdk/tool/schema.py +0 -236
  88. openhands/sdk/tool/security_prompt.py +0 -5
  89. openhands/sdk/tool/tool.py +0 -142
  90. openhands/sdk/utils/__init__.py +0 -14
  91. openhands/sdk/utils/discriminated_union.py +0 -210
  92. openhands/sdk/utils/json.py +0 -48
  93. openhands/sdk/utils/truncate.py +0 -44
  94. openhands/tools/__init__.py +0 -44
  95. openhands/tools/execute_bash/__init__.py +0 -30
  96. openhands/tools/execute_bash/constants.py +0 -31
  97. openhands/tools/execute_bash/definition.py +0 -166
  98. openhands/tools/execute_bash/impl.py +0 -38
  99. openhands/tools/execute_bash/metadata.py +0 -101
  100. openhands/tools/execute_bash/terminal/__init__.py +0 -22
  101. openhands/tools/execute_bash/terminal/factory.py +0 -113
  102. openhands/tools/execute_bash/terminal/interface.py +0 -189
  103. openhands/tools/execute_bash/terminal/subprocess_terminal.py +0 -412
  104. openhands/tools/execute_bash/terminal/terminal_session.py +0 -492
  105. openhands/tools/execute_bash/terminal/tmux_terminal.py +0 -160
  106. openhands/tools/execute_bash/utils/command.py +0 -150
  107. openhands/tools/str_replace_editor/__init__.py +0 -17
  108. openhands/tools/str_replace_editor/definition.py +0 -158
  109. openhands/tools/str_replace_editor/editor.py +0 -683
  110. openhands/tools/str_replace_editor/exceptions.py +0 -41
  111. openhands/tools/str_replace_editor/impl.py +0 -66
  112. openhands/tools/str_replace_editor/utils/__init__.py +0 -0
  113. openhands/tools/str_replace_editor/utils/config.py +0 -2
  114. openhands/tools/str_replace_editor/utils/constants.py +0 -9
  115. openhands/tools/str_replace_editor/utils/encoding.py +0 -135
  116. openhands/tools/str_replace_editor/utils/file_cache.py +0 -154
  117. openhands/tools/str_replace_editor/utils/history.py +0 -122
  118. openhands/tools/str_replace_editor/utils/shell.py +0 -72
  119. openhands/tools/task_tracker/__init__.py +0 -16
  120. openhands/tools/task_tracker/definition.py +0 -336
  121. openhands/tools/utils/__init__.py +0 -1
  122. openhands-0.0.0.dist-info/METADATA +0 -3
  123. openhands-0.0.0.dist-info/RECORD +0 -94
  124. openhands-0.0.0.dist-info/top_level.txt +0 -1
@@ -1,27 +0,0 @@
1
- from openhands.sdk.event.base import Event, EventBase, LLMConvertibleEvent
2
- from openhands.sdk.event.condenser import Condensation, CondensationRequest
3
- from openhands.sdk.event.llm_convertible import (
4
- ActionEvent,
5
- AgentErrorEvent,
6
- MessageEvent,
7
- ObservationEvent,
8
- SystemPromptEvent,
9
- UserRejectObservation,
10
- )
11
- from openhands.sdk.event.user_action import PauseEvent
12
-
13
-
14
- __all__ = [
15
- "EventBase",
16
- "LLMConvertibleEvent",
17
- "SystemPromptEvent",
18
- "ActionEvent",
19
- "ObservationEvent",
20
- "MessageEvent",
21
- "AgentErrorEvent",
22
- "UserRejectObservation",
23
- "PauseEvent",
24
- "Event",
25
- "Condensation",
26
- "CondensationRequest",
27
- ]
@@ -1,148 +0,0 @@
1
- import uuid
2
- from abc import ABC, abstractmethod
3
- from datetime import datetime
4
- from typing import TYPE_CHECKING, Annotated, cast
5
-
6
- from pydantic import BaseModel, ConfigDict, Field
7
-
8
- from openhands.sdk.event.types import SourceType
9
- from openhands.sdk.llm import ImageContent, Message, TextContent
10
- from openhands.sdk.utils.discriminated_union import (
11
- DiscriminatedUnionMixin,
12
- DiscriminatedUnionType,
13
- )
14
-
15
-
16
- if TYPE_CHECKING:
17
- from openhands.sdk.event.llm_convertible import ActionEvent
18
-
19
- N_CHAR_PREVIEW = 500
20
-
21
-
22
- class EventBase(DiscriminatedUnionMixin, BaseModel, ABC):
23
- """Base class for all events."""
24
-
25
- model_config = ConfigDict(extra="forbid")
26
- id: str = Field(
27
- default_factory=lambda: str(uuid.uuid4()),
28
- description="Unique event id (ULID/UUID)",
29
- )
30
- timestamp: str = Field(
31
- default_factory=lambda: datetime.now().isoformat(),
32
- description="Event timestamp",
33
- ) # consistent with V1
34
- source: SourceType = Field(..., description="The source of this event")
35
-
36
- def __str__(self) -> str:
37
- """Plain text string representation for display."""
38
- return f"{self.__class__.__name__} ({self.source})"
39
-
40
- def __repr__(self) -> str:
41
- """Developer-friendly representation."""
42
- return (
43
- f"{self.__class__.__name__}(id='{self.id[:8]}...', "
44
- f"source='{self.source}', timestamp='{self.timestamp}')"
45
- )
46
-
47
-
48
- Event = Annotated[EventBase, DiscriminatedUnionType[EventBase]]
49
- """Type annotation for values that can be any implementation of EventBase.
50
-
51
- In most situations, this is equivalent to EventBase. However, when used in Pydantic
52
- BaseModels as a field annotation, it enables polymorphic deserialization by delaying the
53
- discriminator resolution until runtime.
54
- """
55
-
56
-
57
- class LLMConvertibleEvent(EventBase, ABC):
58
- """Base class for events that can be converted to LLM messages."""
59
-
60
- @abstractmethod
61
- def to_llm_message(self) -> Message:
62
- raise NotImplementedError()
63
-
64
- def __str__(self) -> str:
65
- """Plain text string representation showing LLM message content."""
66
- base_str = super().__str__()
67
- try:
68
- llm_message = self.to_llm_message()
69
- # Extract text content from the message
70
- text_parts = []
71
- for content in llm_message.content:
72
- if isinstance(content, TextContent):
73
- text_parts.append(content.text)
74
- elif isinstance(content, ImageContent):
75
- text_parts.append(f"[Image: {len(content.image_urls)} URLs]")
76
-
77
- if text_parts:
78
- content_preview = " ".join(text_parts)
79
- # Truncate long content for display
80
- if len(content_preview) > N_CHAR_PREVIEW:
81
- content_preview = content_preview[: N_CHAR_PREVIEW - 3] + "..."
82
- return f"{base_str}\n {llm_message.role}: {content_preview}"
83
- else:
84
- return f"{base_str}\n {llm_message.role}: [no text content]"
85
- except Exception:
86
- # Fallback to base representation if LLM message conversion fails
87
- return base_str
88
-
89
- @staticmethod
90
- def events_to_messages(events: list["LLMConvertibleEvent"]) -> list[Message]:
91
- """Convert event stream to LLM message stream, handling multi-action batches"""
92
- # TODO: We should add extensive tests for this
93
- from openhands.sdk.event.llm_convertible import ActionEvent
94
-
95
- messages = []
96
- i = 0
97
-
98
- while i < len(events):
99
- event = events[i]
100
-
101
- if isinstance(event, ActionEvent):
102
- # Collect all ActionEvents from same LLM respone
103
- # This happens when function calling happens
104
- batch_events: list[ActionEvent] = [event]
105
- response_id = event.llm_response_id
106
-
107
- # Look ahead for related events
108
- j = i + 1
109
- while (
110
- j < len(events)
111
- and isinstance(events[j], ActionEvent)
112
- and cast(ActionEvent, events[j]).llm_response_id == response_id
113
- ):
114
- batch_events.append(cast(ActionEvent, events[j]))
115
- j += 1
116
-
117
- # Create combined message for the response
118
- messages.append(_combine_action_events(batch_events))
119
- i = j
120
- else:
121
- # Regular event - direct conversion
122
- messages.append(event.to_llm_message())
123
- i += 1
124
-
125
- return messages
126
-
127
-
128
- def _combine_action_events(events: list["ActionEvent"]) -> Message:
129
- """Combine multiple ActionEvents into single LLM message.
130
-
131
- We receive multiple ActionEvents per LLM message WHEN LLM returns
132
- multiple tool calls with parallel function calling.
133
- """
134
- if len(events) == 1:
135
- return events[0].to_llm_message()
136
- # Multi-action case - reconstruct original LLM response
137
- for e in events[1:]:
138
- assert len(e.thought) == 0, (
139
- "Expected empty thought for multi-action events after the first one"
140
- )
141
-
142
- return Message(
143
- role="assistant",
144
- content=cast(
145
- list[TextContent | ImageContent], events[0].thought
146
- ), # Shared thought content only in the first event
147
- tool_calls=[event.tool_call for event in events],
148
- )
@@ -1,49 +0,0 @@
1
- from openhands.sdk.event.base import EventBase
2
- from openhands.sdk.event.types import SourceType
3
-
4
-
5
- class Condensation(EventBase):
6
- """This action indicates a condensation of the conversation history is happening."""
7
-
8
- forgotten_event_ids: list[str] | None = None
9
- """The IDs of the events that are being forgotten (removed from the `View` given to
10
- the LLM).
11
- """
12
-
13
- summary: str | None = None
14
- """An optional summary of the events being forgotten."""
15
-
16
- summary_offset: int | None = None
17
- """An optional offset to the start of the resulting view indicating where the
18
- summary should be inserted.
19
- """
20
-
21
- source: SourceType = "environment"
22
-
23
- @property
24
- def forgotten(self) -> list[str]:
25
- """The list of event IDs that should be forgotten."""
26
- if self.forgotten_event_ids is not None:
27
- return self.forgotten_event_ids
28
- else:
29
- return []
30
-
31
- @property
32
- def message(self) -> str:
33
- if self.summary:
34
- return f"Summary: {self.summary}"
35
- return f"Condenser is dropping the events: {self.forgotten}."
36
-
37
-
38
- class CondensationRequest(EventBase):
39
- """This action is used to request a condensation of the conversation history.
40
-
41
- Attributes:
42
- action (str): The action type, namely ActionType.CONDENSATION_REQUEST.
43
- """
44
-
45
- source: SourceType = "environment"
46
-
47
- @property
48
- def message(self) -> str:
49
- return "Requesting a condensation of the conversation history."
@@ -1,265 +0,0 @@
1
- import copy
2
- from typing import cast
3
-
4
- from litellm import ChatCompletionMessageToolCall, ChatCompletionToolParam
5
- from pydantic import ConfigDict, Field, computed_field
6
-
7
- from openhands.sdk.event.base import N_CHAR_PREVIEW, LLMConvertibleEvent
8
- from openhands.sdk.event.types import SourceType
9
- from openhands.sdk.llm import ImageContent, Message, TextContent, content_to_str
10
- from openhands.sdk.llm.utils.metrics import MetricsSnapshot
11
- from openhands.sdk.tool import Action, Observation
12
-
13
-
14
- class SystemPromptEvent(LLMConvertibleEvent):
15
- """System prompt added by the agent."""
16
-
17
- source: SourceType = "agent"
18
- system_prompt: TextContent = Field(..., description="The system prompt text")
19
- tools: list[ChatCompletionToolParam] = Field(
20
- ..., description="List of tools in OpenAI tool format"
21
- )
22
-
23
- def to_llm_message(self) -> Message:
24
- return Message(role="system", content=[self.system_prompt])
25
-
26
- def __str__(self) -> str:
27
- """Plain text string representation for SystemPromptEvent."""
28
- base_str = f"{self.__class__.__name__} ({self.source})"
29
- prompt_preview = (
30
- self.system_prompt.text[:N_CHAR_PREVIEW] + "..."
31
- if len(self.system_prompt.text) > N_CHAR_PREVIEW
32
- else self.system_prompt.text
33
- )
34
- tool_count = len(self.tools)
35
- return (
36
- f"{base_str}\n System: {prompt_preview}\n Tools: {tool_count} available"
37
- )
38
-
39
-
40
- class ActionEvent(LLMConvertibleEvent):
41
- source: SourceType = "agent"
42
- thought: list[TextContent] = Field(
43
- ..., description="The thought process of the agent before taking this action"
44
- )
45
- reasoning_content: str | None = Field(
46
- default=None,
47
- description="Intermediate reasoning/thinking content from reasoning models",
48
- )
49
- action: Action = Field(..., description="Single action (tool call) returned by LLM")
50
- tool_name: str = Field(..., description="The name of the tool being called")
51
- tool_call_id: str = Field(
52
- ..., description="The unique id returned by LLM API for this tool call"
53
- )
54
- tool_call: ChatCompletionMessageToolCall = Field(
55
- ...,
56
- description=(
57
- "The tool call received from the LLM response. We keep a copy of it "
58
- "so it is easier to construct it into LLM message"
59
- ),
60
- )
61
- llm_response_id: str = Field(
62
- ...,
63
- description=(
64
- "Groups related actions from same LLM response. This helps in tracking "
65
- "and managing results of parallel function calling from the same LLM "
66
- "response."
67
- ),
68
- )
69
- metrics: MetricsSnapshot | None = Field(
70
- default=None,
71
- description=(
72
- "Snapshot of LLM metrics (token counts and costs). Only attached "
73
- "to the last action when multiple actions share the same LLM response."
74
- ),
75
- )
76
-
77
- def to_llm_message(self) -> Message:
78
- """Individual message - may be incomplete for multi-action batches"""
79
- content: list[TextContent | ImageContent] = cast(
80
- list[TextContent | ImageContent], self.thought
81
- )
82
- return Message(
83
- role="assistant",
84
- content=content,
85
- tool_calls=[self.tool_call],
86
- reasoning_content=self.reasoning_content,
87
- )
88
-
89
- def __str__(self) -> str:
90
- """Plain text string representation for ActionEvent."""
91
- base_str = f"{self.__class__.__name__} ({self.source})"
92
- thought_text = " ".join([t.text for t in self.thought])
93
- thought_preview = (
94
- thought_text[:N_CHAR_PREVIEW] + "..."
95
- if len(thought_text) > N_CHAR_PREVIEW
96
- else thought_text
97
- )
98
- action_name = self.action.__class__.__name__
99
- return f"{base_str}\n Thought: {thought_preview}\n Action: {action_name}"
100
-
101
-
102
- class ObservationEvent(LLMConvertibleEvent):
103
- source: SourceType = "environment"
104
- observation: Observation = Field(
105
- ..., description="The observation (tool call) sent to LLM"
106
- )
107
-
108
- action_id: str = Field(
109
- ..., description="The action id that this observation is responding to"
110
- )
111
- tool_name: str = Field(
112
- ..., description="The tool name that this observation is responding to"
113
- )
114
- tool_call_id: str = Field(
115
- ..., description="The tool call id that this observation is responding to"
116
- )
117
-
118
- def to_llm_message(self) -> Message:
119
- return Message(
120
- role="tool",
121
- content=self.observation.agent_observation,
122
- name=self.tool_name,
123
- tool_call_id=self.tool_call_id,
124
- )
125
-
126
- def __str__(self) -> str:
127
- """Plain text string representation for ObservationEvent."""
128
- base_str = f"{self.__class__.__name__} ({self.source})"
129
- content_str = "".join(content_to_str(self.observation.agent_observation))
130
- obs_preview = (
131
- content_str[:N_CHAR_PREVIEW] + "..."
132
- if len(content_str) > N_CHAR_PREVIEW
133
- else content_str
134
- )
135
- return f"{base_str}\n Tool: {self.tool_name}\n Result: {obs_preview}"
136
-
137
-
138
- class MessageEvent(LLMConvertibleEvent):
139
- """Message from either agent or user.
140
-
141
- This is originally the "MessageAction", but it suppose not to be tool call."""
142
-
143
- model_config = ConfigDict(extra="ignore")
144
-
145
- source: SourceType
146
- llm_message: Message = Field(
147
- ..., description="The exact LLM message for this message event"
148
- )
149
- metrics: MetricsSnapshot | None = Field(
150
- default=None,
151
- description=(
152
- "Snapshot of LLM metrics (token counts and costs) for this message. "
153
- "Only attached to messages from agent."
154
- ),
155
- )
156
-
157
- # context extensions stuff / microagent can go here
158
- activated_microagents: list[str] = Field(
159
- default_factory=list, description="List of activated microagent name"
160
- )
161
- extended_content: list[TextContent] = Field(
162
- default_factory=list, description="List of content added by agent context"
163
- )
164
-
165
- @computed_field
166
- def reasoning_content(self) -> str:
167
- return self.llm_message.reasoning_content or ""
168
-
169
- def to_llm_message(self) -> Message:
170
- msg = copy.deepcopy(self.llm_message)
171
- msg.content.extend(self.extended_content)
172
- return msg
173
-
174
- def __str__(self) -> str:
175
- """Plain text string representation for MessageEvent."""
176
- base_str = f"{self.__class__.__name__} ({self.source})"
177
- # Extract text content from the message
178
- text_parts = []
179
- message = self.to_llm_message()
180
- for content in message.content:
181
- if isinstance(content, TextContent):
182
- text_parts.append(content.text)
183
- elif isinstance(content, ImageContent):
184
- text_parts.append(f"[Image: {len(content.image_urls)} URLs]")
185
-
186
- if text_parts:
187
- content_preview = " ".join(text_parts)
188
- if len(content_preview) > N_CHAR_PREVIEW:
189
- content_preview = content_preview[: N_CHAR_PREVIEW - 3] + "..."
190
- microagent_info = (
191
- f" [Microagents: {', '.join(self.activated_microagents)}]"
192
- if self.activated_microagents
193
- else ""
194
- )
195
- return f"{base_str}\n {message.role}: {content_preview}{microagent_info}"
196
- else:
197
- return f"{base_str}\n {message.role}: [no text content]"
198
-
199
-
200
- class UserRejectObservation(LLMConvertibleEvent):
201
- """Observation when user rejects an action in confirmation mode."""
202
-
203
- source: SourceType = "user"
204
- action_id: str = Field(
205
- ..., description="The action id that this rejection is responding to"
206
- )
207
- tool_name: str = Field(
208
- ..., description="The tool name that this rejection is responding to"
209
- )
210
- tool_call_id: str = Field(
211
- ..., description="The tool call id that this rejection is responding to"
212
- )
213
- rejection_reason: str = Field(
214
- default="User rejected the action",
215
- description="Reason for rejecting the action",
216
- )
217
-
218
- def to_llm_message(self) -> Message:
219
- return Message(
220
- role="tool",
221
- content=[TextContent(text=f"Action rejected: {self.rejection_reason}")],
222
- name=self.tool_name,
223
- tool_call_id=self.tool_call_id,
224
- )
225
-
226
- def __str__(self) -> str:
227
- """Plain text string representation for UserRejectObservation."""
228
- base_str = f"{self.__class__.__name__} ({self.source})"
229
- reason_preview = (
230
- self.rejection_reason[:N_CHAR_PREVIEW] + "..."
231
- if len(self.rejection_reason) > N_CHAR_PREVIEW
232
- else self.rejection_reason
233
- )
234
- return f"{base_str}\n Tool: {self.tool_name}\n Reason: {reason_preview}"
235
-
236
-
237
- class AgentErrorEvent(LLMConvertibleEvent):
238
- """Error triggered by the agent.
239
-
240
- Note: This event should not contain model "thought" or "reasoning_content". It
241
- represents an error produced by the agent/scaffold, not model output.
242
- """
243
-
244
- source: SourceType = "agent"
245
- error: str = Field(..., description="The error message from the scaffold")
246
- metrics: MetricsSnapshot | None = Field(
247
- default=None,
248
- description=(
249
- "Snapshot of LLM metrics (token counts and costs). Only attached "
250
- "to the last action when multiple actions share the same LLM response."
251
- ),
252
- )
253
-
254
- def to_llm_message(self) -> Message:
255
- return Message(role="user", content=[TextContent(text=self.error)])
256
-
257
- def __str__(self) -> str:
258
- """Plain text string representation for AgentErrorEvent."""
259
- base_str = f"{self.__class__.__name__} ({self.source})"
260
- error_preview = (
261
- self.error[:N_CHAR_PREVIEW] + "..."
262
- if len(self.error) > N_CHAR_PREVIEW
263
- else self.error
264
- )
265
- return f"{base_str}\n Error: {error_preview}"
@@ -1,5 +0,0 @@
1
- from typing import Literal
2
-
3
-
4
- EventType = Literal["action", "observation", "message", "system_prompt", "agent_error"]
5
- SourceType = Literal["agent", "user", "environment"]
@@ -1,12 +0,0 @@
1
- from openhands.sdk.event.base import EventBase
2
- from openhands.sdk.event.types import SourceType
3
-
4
-
5
- class PauseEvent(EventBase):
6
- """Event indicating that the agent execution was paused by user request."""
7
-
8
- source: SourceType = "user"
9
-
10
- def __str__(self) -> str:
11
- """Plain text string representation for PauseEvent."""
12
- return f"{self.__class__.__name__} ({self.source}): Agent execution paused"
@@ -1,30 +0,0 @@
1
- """Utility functions for event processing."""
2
-
3
- from openhands.sdk.event import ActionEvent, ObservationEvent, UserRejectObservation
4
-
5
-
6
- def get_unmatched_actions(events: list) -> list[ActionEvent]:
7
- """Find actions in the event history that don't have matching observations.
8
-
9
- Optimized to search in reverse chronological order since recent actions
10
- are more likely to be unmatched (pending confirmation).
11
-
12
- Args:
13
- events: List of events to search through
14
-
15
- Returns:
16
- List of ActionEvent objects that don't have corresponding observations
17
- """
18
- observed_action_ids = set()
19
- unmatched_actions = []
20
-
21
- # Search in reverse - recent events are more likely to be unmatched
22
- for event in reversed(events):
23
- if isinstance(event, (ObservationEvent, UserRejectObservation)):
24
- observed_action_ids.add(event.action_id)
25
- elif isinstance(event, ActionEvent):
26
- if event.id not in observed_action_ids:
27
- # Insert at beginning to maintain chronological order in result
28
- unmatched_actions.insert(0, event)
29
-
30
- return unmatched_actions
@@ -1,19 +0,0 @@
1
- from openhands.sdk.llm.llm import LLM
2
- from openhands.sdk.llm.llm_registry import LLMRegistry, RegistryEvent
3
- from openhands.sdk.llm.message import ImageContent, Message, TextContent, content_to_str
4
- from openhands.sdk.llm.metadata import get_llm_metadata
5
- from openhands.sdk.llm.utils.metrics import Metrics, MetricsSnapshot
6
-
7
-
8
- __all__ = [
9
- "LLM",
10
- "LLMRegistry",
11
- "RegistryEvent",
12
- "Message",
13
- "TextContent",
14
- "ImageContent",
15
- "content_to_str",
16
- "get_llm_metadata",
17
- "Metrics",
18
- "MetricsSnapshot",
19
- ]
@@ -1,108 +0,0 @@
1
- class LLMError(Exception):
2
- """Base class for all LLM-related exceptions."""
3
-
4
- def __init__(self, message: str) -> None:
5
- super().__init__(message)
6
- self.message = message
7
-
8
- def __str__(self) -> str:
9
- return self.message
10
-
11
-
12
- class LLMMalformedActionError(LLMError):
13
- """Exception raised when the LLM response is malformed or does not conform to the expected format.""" # noqa: E501
14
-
15
- def __init__(self, message: str = "Malformed response") -> None:
16
- super().__init__(message)
17
-
18
-
19
- class LLMNoActionError(LLMError):
20
- """Exception raised when the LLM response does not include an action."""
21
-
22
- def __init__(self, message: str = "Agent must return an action") -> None:
23
- super().__init__(message)
24
-
25
-
26
- class LLMResponseError(LLMError):
27
- """Exception raised when the LLM response does not include an action or the action is not of the expected type.""" # noqa: E501
28
-
29
- def __init__(
30
- self, message: str = "Failed to retrieve action from LLM response"
31
- ) -> None:
32
- super().__init__(message)
33
-
34
-
35
- class LLMNoResponseError(LLMError):
36
- """Exception raised when the LLM does not return a response, typically seen in
37
- Gemini models.
38
-
39
- This exception should be retried
40
- Typically, after retry with a non-zero temperature, the LLM will return a response
41
- """
42
-
43
- def __init__(
44
- self,
45
- message: str = "LLM did not return a response. This is only seen in Gemini models so far.", # noqa: E501
46
- ) -> None:
47
- super().__init__(message)
48
-
49
-
50
- class LLMContextWindowExceedError(LLMError):
51
- def __init__(
52
- self,
53
- message: str = "Conversation history longer than LLM context window limit. Consider turning on enable_history_truncation config to avoid this error", # noqa: E501
54
- ) -> None:
55
- super().__init__(message)
56
-
57
-
58
- # ============================================
59
- # LLM function calling Exceptions
60
- # ============================================
61
-
62
-
63
- class FunctionCallConversionError(LLMError):
64
- """Exception raised when FunctionCallingConverter failed to convert a non-function
65
- call message to a function call message.
66
-
67
- This typically happens when there's a malformed message (e.g., missing
68
- <function=...> tags). But not due to LLM output.
69
- """
70
-
71
- def __init__(self, message: str) -> None:
72
- super().__init__(message)
73
-
74
-
75
- class FunctionCallValidationError(LLMError):
76
- """Exception raised when FunctionCallingConverter failed to validate a function
77
- call message.
78
-
79
- This typically happens when the LLM outputs unrecognized function call /
80
- parameter names / values.
81
- """
82
-
83
- def __init__(self, message: str) -> None:
84
- super().__init__(message)
85
-
86
-
87
- class FunctionCallNotExistsError(LLMError):
88
- """Exception raised when an LLM call a tool that is not registered."""
89
-
90
- def __init__(self, message: str) -> None:
91
- super().__init__(message)
92
-
93
-
94
- # ============================================
95
- # Other Exceptions
96
- # ============================================
97
-
98
-
99
- class UserCancelledError(Exception):
100
- def __init__(self, message: str = "User cancelled the request") -> None:
101
- super().__init__(message)
102
-
103
-
104
- class OperationCancelled(Exception):
105
- """Exception raised when an operation is cancelled (e.g. by a keyboard interrupt).""" # noqa: E501
106
-
107
- def __init__(self, message: str = "Operation was cancelled") -> None:
108
- super().__init__(message)