openhands-sdk 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/__init__.py +111 -0
- openhands/sdk/agent/__init__.py +8 -0
- openhands/sdk/agent/agent.py +650 -0
- openhands/sdk/agent/base.py +457 -0
- openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
- openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +2 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
- openhands/sdk/agent/prompts/security_policy.j2 +22 -0
- openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
- openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
- openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
- openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
- openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
- openhands/sdk/agent/utils.py +228 -0
- openhands/sdk/context/__init__.py +28 -0
- openhands/sdk/context/agent_context.py +264 -0
- openhands/sdk/context/condenser/__init__.py +18 -0
- openhands/sdk/context/condenser/base.py +100 -0
- openhands/sdk/context/condenser/llm_summarizing_condenser.py +248 -0
- openhands/sdk/context/condenser/no_op_condenser.py +14 -0
- openhands/sdk/context/condenser/pipeline_condenser.py +56 -0
- openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
- openhands/sdk/context/condenser/utils.py +149 -0
- openhands/sdk/context/prompts/__init__.py +6 -0
- openhands/sdk/context/prompts/prompt.py +114 -0
- openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
- openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
- openhands/sdk/context/skills/__init__.py +28 -0
- openhands/sdk/context/skills/exceptions.py +11 -0
- openhands/sdk/context/skills/skill.py +720 -0
- openhands/sdk/context/skills/trigger.py +36 -0
- openhands/sdk/context/skills/types.py +48 -0
- openhands/sdk/context/view.py +503 -0
- openhands/sdk/conversation/__init__.py +40 -0
- openhands/sdk/conversation/base.py +281 -0
- openhands/sdk/conversation/conversation.py +152 -0
- openhands/sdk/conversation/conversation_stats.py +85 -0
- openhands/sdk/conversation/event_store.py +157 -0
- openhands/sdk/conversation/events_list_base.py +17 -0
- openhands/sdk/conversation/exceptions.py +50 -0
- openhands/sdk/conversation/fifo_lock.py +133 -0
- openhands/sdk/conversation/impl/__init__.py +5 -0
- openhands/sdk/conversation/impl/local_conversation.py +665 -0
- openhands/sdk/conversation/impl/remote_conversation.py +956 -0
- openhands/sdk/conversation/persistence_const.py +9 -0
- openhands/sdk/conversation/response_utils.py +41 -0
- openhands/sdk/conversation/secret_registry.py +126 -0
- openhands/sdk/conversation/serialization_diff.py +0 -0
- openhands/sdk/conversation/state.py +392 -0
- openhands/sdk/conversation/stuck_detector.py +311 -0
- openhands/sdk/conversation/title_utils.py +191 -0
- openhands/sdk/conversation/types.py +45 -0
- openhands/sdk/conversation/visualizer/__init__.py +12 -0
- openhands/sdk/conversation/visualizer/base.py +67 -0
- openhands/sdk/conversation/visualizer/default.py +373 -0
- openhands/sdk/critic/__init__.py +15 -0
- openhands/sdk/critic/base.py +38 -0
- openhands/sdk/critic/impl/__init__.py +12 -0
- openhands/sdk/critic/impl/agent_finished.py +83 -0
- openhands/sdk/critic/impl/empty_patch.py +49 -0
- openhands/sdk/critic/impl/pass_critic.py +42 -0
- openhands/sdk/event/__init__.py +42 -0
- openhands/sdk/event/base.py +149 -0
- openhands/sdk/event/condenser.py +82 -0
- openhands/sdk/event/conversation_error.py +25 -0
- openhands/sdk/event/conversation_state.py +104 -0
- openhands/sdk/event/llm_completion_log.py +39 -0
- openhands/sdk/event/llm_convertible/__init__.py +20 -0
- openhands/sdk/event/llm_convertible/action.py +139 -0
- openhands/sdk/event/llm_convertible/message.py +142 -0
- openhands/sdk/event/llm_convertible/observation.py +141 -0
- openhands/sdk/event/llm_convertible/system.py +61 -0
- openhands/sdk/event/token.py +16 -0
- openhands/sdk/event/types.py +11 -0
- openhands/sdk/event/user_action.py +21 -0
- openhands/sdk/git/exceptions.py +43 -0
- openhands/sdk/git/git_changes.py +249 -0
- openhands/sdk/git/git_diff.py +129 -0
- openhands/sdk/git/models.py +21 -0
- openhands/sdk/git/utils.py +189 -0
- openhands/sdk/hooks/__init__.py +30 -0
- openhands/sdk/hooks/config.py +180 -0
- openhands/sdk/hooks/conversation_hooks.py +227 -0
- openhands/sdk/hooks/executor.py +155 -0
- openhands/sdk/hooks/manager.py +170 -0
- openhands/sdk/hooks/types.py +40 -0
- openhands/sdk/io/__init__.py +6 -0
- openhands/sdk/io/base.py +48 -0
- openhands/sdk/io/cache.py +85 -0
- openhands/sdk/io/local.py +119 -0
- openhands/sdk/io/memory.py +54 -0
- openhands/sdk/llm/__init__.py +45 -0
- openhands/sdk/llm/exceptions/__init__.py +45 -0
- openhands/sdk/llm/exceptions/classifier.py +50 -0
- openhands/sdk/llm/exceptions/mapping.py +54 -0
- openhands/sdk/llm/exceptions/types.py +101 -0
- openhands/sdk/llm/llm.py +1140 -0
- openhands/sdk/llm/llm_registry.py +122 -0
- openhands/sdk/llm/llm_response.py +59 -0
- openhands/sdk/llm/message.py +656 -0
- openhands/sdk/llm/mixins/fn_call_converter.py +1288 -0
- openhands/sdk/llm/mixins/non_native_fc.py +97 -0
- openhands/sdk/llm/options/__init__.py +1 -0
- openhands/sdk/llm/options/chat_options.py +93 -0
- openhands/sdk/llm/options/common.py +19 -0
- openhands/sdk/llm/options/responses_options.py +67 -0
- openhands/sdk/llm/router/__init__.py +10 -0
- openhands/sdk/llm/router/base.py +117 -0
- openhands/sdk/llm/router/impl/multimodal.py +76 -0
- openhands/sdk/llm/router/impl/random.py +22 -0
- openhands/sdk/llm/streaming.py +9 -0
- openhands/sdk/llm/utils/metrics.py +312 -0
- openhands/sdk/llm/utils/model_features.py +192 -0
- openhands/sdk/llm/utils/model_info.py +90 -0
- openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
- openhands/sdk/llm/utils/retry_mixin.py +128 -0
- openhands/sdk/llm/utils/telemetry.py +362 -0
- openhands/sdk/llm/utils/unverified_models.py +156 -0
- openhands/sdk/llm/utils/verified_models.py +65 -0
- openhands/sdk/logger/__init__.py +22 -0
- openhands/sdk/logger/logger.py +195 -0
- openhands/sdk/logger/rolling.py +113 -0
- openhands/sdk/mcp/__init__.py +24 -0
- openhands/sdk/mcp/client.py +76 -0
- openhands/sdk/mcp/definition.py +106 -0
- openhands/sdk/mcp/exceptions.py +19 -0
- openhands/sdk/mcp/tool.py +270 -0
- openhands/sdk/mcp/utils.py +83 -0
- openhands/sdk/observability/__init__.py +4 -0
- openhands/sdk/observability/laminar.py +166 -0
- openhands/sdk/observability/utils.py +20 -0
- openhands/sdk/py.typed +0 -0
- openhands/sdk/secret/__init__.py +19 -0
- openhands/sdk/secret/secrets.py +92 -0
- openhands/sdk/security/__init__.py +6 -0
- openhands/sdk/security/analyzer.py +111 -0
- openhands/sdk/security/confirmation_policy.py +61 -0
- openhands/sdk/security/llm_analyzer.py +29 -0
- openhands/sdk/security/risk.py +100 -0
- openhands/sdk/tool/__init__.py +34 -0
- openhands/sdk/tool/builtins/__init__.py +34 -0
- openhands/sdk/tool/builtins/finish.py +106 -0
- openhands/sdk/tool/builtins/think.py +117 -0
- openhands/sdk/tool/registry.py +184 -0
- openhands/sdk/tool/schema.py +286 -0
- openhands/sdk/tool/spec.py +39 -0
- openhands/sdk/tool/tool.py +481 -0
- openhands/sdk/utils/__init__.py +22 -0
- openhands/sdk/utils/async_executor.py +115 -0
- openhands/sdk/utils/async_utils.py +39 -0
- openhands/sdk/utils/cipher.py +68 -0
- openhands/sdk/utils/command.py +90 -0
- openhands/sdk/utils/deprecation.py +166 -0
- openhands/sdk/utils/github.py +44 -0
- openhands/sdk/utils/json.py +48 -0
- openhands/sdk/utils/models.py +570 -0
- openhands/sdk/utils/paging.py +63 -0
- openhands/sdk/utils/pydantic_diff.py +85 -0
- openhands/sdk/utils/pydantic_secrets.py +64 -0
- openhands/sdk/utils/truncate.py +117 -0
- openhands/sdk/utils/visualize.py +58 -0
- openhands/sdk/workspace/__init__.py +17 -0
- openhands/sdk/workspace/base.py +158 -0
- openhands/sdk/workspace/local.py +189 -0
- openhands/sdk/workspace/models.py +35 -0
- openhands/sdk/workspace/remote/__init__.py +8 -0
- openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
- openhands/sdk/workspace/remote/base.py +164 -0
- openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
- openhands/sdk/workspace/workspace.py +49 -0
- openhands_sdk-1.7.3.dist-info/METADATA +17 -0
- openhands_sdk-1.7.3.dist-info/RECORD +180 -0
- openhands_sdk-1.7.3.dist-info/WHEEL +5 -0
- openhands_sdk-1.7.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from typing import TYPE_CHECKING, ClassVar
|
|
5
|
+
|
|
6
|
+
from pydantic import ConfigDict, Field
|
|
7
|
+
from rich.text import Text
|
|
8
|
+
|
|
9
|
+
from openhands.sdk.event.types import EventID, SourceType
|
|
10
|
+
from openhands.sdk.llm import ImageContent, Message, TextContent
|
|
11
|
+
from openhands.sdk.utils.models import DiscriminatedUnionMixin
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from openhands.sdk.event.llm_convertible import ActionEvent
|
|
16
|
+
|
|
17
|
+
N_CHAR_PREVIEW = 500
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Event(DiscriminatedUnionMixin, ABC):
|
|
21
|
+
"""Base class for all events."""
|
|
22
|
+
|
|
23
|
+
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid", frozen=True)
|
|
24
|
+
id: EventID = Field(
|
|
25
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
26
|
+
description="Unique event id (ULID/UUID)",
|
|
27
|
+
)
|
|
28
|
+
timestamp: str = Field(
|
|
29
|
+
default_factory=lambda: datetime.now().isoformat(),
|
|
30
|
+
description="Event timestamp",
|
|
31
|
+
) # consistent with V1
|
|
32
|
+
source: SourceType = Field(..., description="The source of this event")
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def visualize(self) -> Text:
|
|
36
|
+
"""Return Rich Text representation of this event.
|
|
37
|
+
|
|
38
|
+
This is a fallback implementation for unknown event types.
|
|
39
|
+
Subclasses should override this method to provide specific visualization.
|
|
40
|
+
"""
|
|
41
|
+
content = Text()
|
|
42
|
+
content.append(f"Unknown event type: {self.__class__.__name__}")
|
|
43
|
+
content.append(f"\n{self.model_dump()}")
|
|
44
|
+
return content
|
|
45
|
+
|
|
46
|
+
def __str__(self) -> str:
|
|
47
|
+
"""Plain text string representation for display."""
|
|
48
|
+
return f"{self.__class__.__name__} ({self.source})"
|
|
49
|
+
|
|
50
|
+
def __repr__(self) -> str:
|
|
51
|
+
"""Developer-friendly representation."""
|
|
52
|
+
return (
|
|
53
|
+
f"{self.__class__.__name__}(id='{self.id[:8]}...', "
|
|
54
|
+
f"source='{self.source}', timestamp='{self.timestamp}')"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class LLMConvertibleEvent(Event, ABC):
|
|
59
|
+
"""Base class for events that can be converted to LLM messages."""
|
|
60
|
+
|
|
61
|
+
@abstractmethod
|
|
62
|
+
def to_llm_message(self) -> Message:
|
|
63
|
+
raise NotImplementedError()
|
|
64
|
+
|
|
65
|
+
def __str__(self) -> str:
|
|
66
|
+
"""Plain text string representation showing LLM message content."""
|
|
67
|
+
base_str = super().__str__()
|
|
68
|
+
try:
|
|
69
|
+
llm_message = self.to_llm_message()
|
|
70
|
+
# Extract text content from the message
|
|
71
|
+
text_parts = []
|
|
72
|
+
for content in llm_message.content:
|
|
73
|
+
if isinstance(content, TextContent):
|
|
74
|
+
text_parts.append(content.text)
|
|
75
|
+
elif isinstance(content, ImageContent):
|
|
76
|
+
text_parts.append(f"[Image: {len(content.image_urls)} URLs]")
|
|
77
|
+
|
|
78
|
+
if text_parts:
|
|
79
|
+
content_preview = " ".join(text_parts)
|
|
80
|
+
# Truncate long content for display
|
|
81
|
+
if len(content_preview) > N_CHAR_PREVIEW:
|
|
82
|
+
content_preview = content_preview[: N_CHAR_PREVIEW - 3] + "..."
|
|
83
|
+
return f"{base_str}\n {llm_message.role}: {content_preview}"
|
|
84
|
+
else:
|
|
85
|
+
return f"{base_str}\n {llm_message.role}: [no text content]"
|
|
86
|
+
except Exception:
|
|
87
|
+
# Fallback to base representation if LLM message conversion fails
|
|
88
|
+
return base_str
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def events_to_messages(events: list["LLMConvertibleEvent"]) -> list[Message]:
|
|
92
|
+
"""Convert event stream to LLM message stream, handling multi-action batches"""
|
|
93
|
+
# TODO: We should add extensive tests for this
|
|
94
|
+
from openhands.sdk.event.llm_convertible import ActionEvent
|
|
95
|
+
|
|
96
|
+
messages = []
|
|
97
|
+
i = 0
|
|
98
|
+
|
|
99
|
+
while i < len(events):
|
|
100
|
+
event = events[i]
|
|
101
|
+
|
|
102
|
+
if isinstance(event, ActionEvent):
|
|
103
|
+
# Collect all ActionEvents from same LLM respone
|
|
104
|
+
# This happens when function calling happens
|
|
105
|
+
batch_events: list[ActionEvent] = [event]
|
|
106
|
+
response_id = event.llm_response_id
|
|
107
|
+
|
|
108
|
+
# Look ahead for related events
|
|
109
|
+
j = i + 1
|
|
110
|
+
while j < len(events) and isinstance(events[j], ActionEvent):
|
|
111
|
+
event = events[j]
|
|
112
|
+
assert isinstance(event, ActionEvent) # for type checker
|
|
113
|
+
if event.llm_response_id != response_id:
|
|
114
|
+
break
|
|
115
|
+
batch_events.append(event)
|
|
116
|
+
j += 1
|
|
117
|
+
|
|
118
|
+
# Create combined message for the response
|
|
119
|
+
messages.append(_combine_action_events(batch_events))
|
|
120
|
+
i = j
|
|
121
|
+
else:
|
|
122
|
+
# Regular event - direct conversion
|
|
123
|
+
messages.append(event.to_llm_message())
|
|
124
|
+
i += 1
|
|
125
|
+
|
|
126
|
+
return messages
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _combine_action_events(events: list["ActionEvent"]) -> Message:
|
|
130
|
+
"""Combine multiple ActionEvents into single LLM message.
|
|
131
|
+
|
|
132
|
+
We receive multiple ActionEvents per LLM message WHEN LLM returns
|
|
133
|
+
multiple tool calls with parallel function calling.
|
|
134
|
+
"""
|
|
135
|
+
if len(events) == 1:
|
|
136
|
+
return events[0].to_llm_message()
|
|
137
|
+
# Multi-action case - reconstruct original LLM response
|
|
138
|
+
for e in events[1:]:
|
|
139
|
+
assert len(e.thought) == 0, (
|
|
140
|
+
"Expected empty thought for multi-action events after the first one"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return Message(
|
|
144
|
+
role="assistant",
|
|
145
|
+
content=events[0].thought, # Shared thought content only in the first event
|
|
146
|
+
tool_calls=[event.tool_call for event in events],
|
|
147
|
+
reasoning_content=events[0].reasoning_content, # Shared reasoning content
|
|
148
|
+
thinking_blocks=events[0].thinking_blocks, # Shared thinking blocks
|
|
149
|
+
)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
from pydantic import Field
|
|
2
|
+
from rich.text import Text
|
|
3
|
+
|
|
4
|
+
from openhands.sdk.event.base import Event, LLMConvertibleEvent
|
|
5
|
+
from openhands.sdk.event.types import EventID, SourceType
|
|
6
|
+
from openhands.sdk.llm import Message, TextContent
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Condensation(Event):
|
|
10
|
+
"""This action indicates a condensation of the conversation history is happening."""
|
|
11
|
+
|
|
12
|
+
forgotten_event_ids: list[EventID] = Field(
|
|
13
|
+
default_factory=list,
|
|
14
|
+
description="The IDs of the events that are being forgotten "
|
|
15
|
+
"(removed from the `View` given to the LLM).",
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
summary: str | None = Field(
|
|
19
|
+
default=None, description="An optional summary of the events being forgotten."
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
summary_offset: int | None = Field(
|
|
23
|
+
default=None,
|
|
24
|
+
ge=0,
|
|
25
|
+
description="An optional offset to the start of the resulting view"
|
|
26
|
+
" indicating where the summary should be inserted.",
|
|
27
|
+
)
|
|
28
|
+
llm_response_id: EventID = Field(
|
|
29
|
+
description=(
|
|
30
|
+
"Completion or Response ID of the LLM response that generated this event"
|
|
31
|
+
),
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
source: SourceType = "environment"
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def visualize(self) -> Text:
|
|
38
|
+
text = Text()
|
|
39
|
+
|
|
40
|
+
text.append("Auto Conversation Condensation Triggered.\n", style="bold")
|
|
41
|
+
|
|
42
|
+
text.append(f"Forgetting {len(self.forgotten_event_ids)} events\n")
|
|
43
|
+
if self.summary:
|
|
44
|
+
text.append("\n[Summary of Events Being Forgotten]\n", style="bold")
|
|
45
|
+
text.append(f"{self.summary}\n")
|
|
46
|
+
return text
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class CondensationRequest(Event):
|
|
50
|
+
"""This action is used to request a condensation of the conversation history.
|
|
51
|
+
|
|
52
|
+
Attributes:
|
|
53
|
+
action (str): The action type, namely ActionType.CONDENSATION_REQUEST.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
source: SourceType = "environment"
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def visualize(self) -> Text:
|
|
60
|
+
text = Text()
|
|
61
|
+
text.append("Conversation Condensation Requested\n", style="bold")
|
|
62
|
+
message = (
|
|
63
|
+
"A condensation of the conversation history has been requested to "
|
|
64
|
+
"manage context window usage.\n"
|
|
65
|
+
)
|
|
66
|
+
text.append(message)
|
|
67
|
+
return text
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class CondensationSummaryEvent(LLMConvertibleEvent):
|
|
71
|
+
"""This event represents a summary generated by a condenser."""
|
|
72
|
+
|
|
73
|
+
summary: str
|
|
74
|
+
"""The summary text."""
|
|
75
|
+
|
|
76
|
+
source: SourceType = "environment"
|
|
77
|
+
|
|
78
|
+
def to_llm_message(self) -> Message:
|
|
79
|
+
return Message(
|
|
80
|
+
role="user",
|
|
81
|
+
content=[TextContent(text=self.summary)],
|
|
82
|
+
)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from pydantic import Field
|
|
2
|
+
|
|
3
|
+
from openhands.sdk.event.base import Event
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ConversationErrorEvent(Event):
|
|
7
|
+
"""
|
|
8
|
+
Conversation-level failure that is NOT sent back to the LLM.
|
|
9
|
+
|
|
10
|
+
This event is emitted by the conversation runtime when an unexpected
|
|
11
|
+
exception bubbles up and prevents the run loop from continuing. It is
|
|
12
|
+
intended for client applications (e.g., UIs) to present a top-level error
|
|
13
|
+
state, and for orchestration to react. It is not an observation and it is
|
|
14
|
+
not LLM-convertible.
|
|
15
|
+
|
|
16
|
+
Differences from AgentErrorEvent:
|
|
17
|
+
- Not tied to any tool_name/tool_call_id (AgentErrorEvent is a tool
|
|
18
|
+
observation).
|
|
19
|
+
- Typically source='environment' and the run loop moves to an ERROR state,
|
|
20
|
+
while AgentErrorEvent has source='agent' and the conversation can
|
|
21
|
+
continue.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
code: str = Field(description="Code for the error - typically a type")
|
|
25
|
+
detail: str = Field(description="Details about the error")
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Events related to conversation state updates."""
|
|
2
|
+
|
|
3
|
+
import uuid
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
from pydantic import Field, field_validator
|
|
7
|
+
|
|
8
|
+
from openhands.sdk.event.base import Event
|
|
9
|
+
from openhands.sdk.event.types import SourceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from openhands.sdk.conversation.state import ConversationState
|
|
14
|
+
|
|
15
|
+
FULL_STATE_KEY = "full_state"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ConversationStateUpdateEvent(Event):
|
|
19
|
+
"""Event that contains conversation state updates.
|
|
20
|
+
|
|
21
|
+
This event is sent via websocket whenever the conversation state changes,
|
|
22
|
+
allowing remote clients to stay in sync without making REST API calls.
|
|
23
|
+
|
|
24
|
+
All fields are serialized versions of the corresponding ConversationState fields
|
|
25
|
+
to ensure compatibility with websocket transmission.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
source: SourceType = "environment"
|
|
29
|
+
key: str = Field(
|
|
30
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
31
|
+
description="Unique key for this state update event",
|
|
32
|
+
)
|
|
33
|
+
value: Any = Field(
|
|
34
|
+
default_factory=dict,
|
|
35
|
+
description="Serialized conversation state updates",
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
@field_validator("key")
|
|
39
|
+
def validate_key(cls, key):
|
|
40
|
+
if not isinstance(key, str):
|
|
41
|
+
raise ValueError("Key must be a string")
|
|
42
|
+
# Allow special key "full_state" for full state snapshots
|
|
43
|
+
if key == FULL_STATE_KEY:
|
|
44
|
+
return key
|
|
45
|
+
# Allow any string key for flexibility (testing, future extensibility)
|
|
46
|
+
# In practice, keys should match ConversationState fields,
|
|
47
|
+
# but we don't enforce it
|
|
48
|
+
return key
|
|
49
|
+
|
|
50
|
+
@field_validator("value")
|
|
51
|
+
def validate_value(cls, value, info):
|
|
52
|
+
# Prevent circular import
|
|
53
|
+
from openhands.sdk.conversation.conversation_stats import ConversationStats
|
|
54
|
+
|
|
55
|
+
# For ConversationStats, use snapshot serialization to avoid
|
|
56
|
+
# sending lengthy lists over WebSocket
|
|
57
|
+
if isinstance(value, ConversationStats):
|
|
58
|
+
return value.model_dump(mode="json", context={"use_snapshot": True})
|
|
59
|
+
|
|
60
|
+
key = info.data.get("key")
|
|
61
|
+
if key is None:
|
|
62
|
+
# Allow value without key for flexibility
|
|
63
|
+
return value
|
|
64
|
+
|
|
65
|
+
# Skip validation for special "full_state" key
|
|
66
|
+
if key == FULL_STATE_KEY:
|
|
67
|
+
return value
|
|
68
|
+
|
|
69
|
+
# Prevent circular import
|
|
70
|
+
from openhands.sdk.conversation.state import ConversationState
|
|
71
|
+
|
|
72
|
+
field_info = ConversationState.model_fields.get(key)
|
|
73
|
+
if field_info is None:
|
|
74
|
+
# Allow arbitrary keys for testing/future extensibility
|
|
75
|
+
return value
|
|
76
|
+
|
|
77
|
+
# Skip type validation - just accept any value
|
|
78
|
+
# The actual type conversion will happen when the state is updated
|
|
79
|
+
return value
|
|
80
|
+
|
|
81
|
+
@classmethod
|
|
82
|
+
def from_conversation_state(
|
|
83
|
+
cls, state: "ConversationState"
|
|
84
|
+
) -> "ConversationStateUpdateEvent":
|
|
85
|
+
"""Create a state update event from a ConversationState object.
|
|
86
|
+
|
|
87
|
+
This creates an event containing a snapshot of important state fields.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
state: The ConversationState to serialize
|
|
91
|
+
conversation_id: The conversation ID for the event
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
A ConversationStateUpdateEvent with serialized state data
|
|
95
|
+
"""
|
|
96
|
+
# Create a snapshot with all important state fields
|
|
97
|
+
# Use mode='json' to ensure proper serialization including SecretStr
|
|
98
|
+
state_snapshot = state.model_dump(mode="json", exclude_none=True)
|
|
99
|
+
|
|
100
|
+
# Use a special key "full_state" to indicate this is a full snapshot
|
|
101
|
+
return cls(key=FULL_STATE_KEY, value=state_snapshot)
|
|
102
|
+
|
|
103
|
+
def __str__(self) -> str:
|
|
104
|
+
return f"ConversationStateUpdate(key={self.key}, value={self.value})"
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Event for streaming LLM completion logs from remote agents to clients."""
|
|
2
|
+
|
|
3
|
+
from pydantic import Field
|
|
4
|
+
|
|
5
|
+
from openhands.sdk.event.base import Event
|
|
6
|
+
from openhands.sdk.event.types import SourceType
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LLMCompletionLogEvent(Event):
|
|
10
|
+
"""Event containing LLM completion log data.
|
|
11
|
+
|
|
12
|
+
When an LLM is configured with log_completions=True in a remote conversation,
|
|
13
|
+
this event streams the completion log data back to the client through WebSocket
|
|
14
|
+
instead of writing it to a file inside the Docker container.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
source: SourceType = "environment"
|
|
18
|
+
filename: str = Field(
|
|
19
|
+
...,
|
|
20
|
+
description="The intended filename for this log (relative to log directory)",
|
|
21
|
+
)
|
|
22
|
+
log_data: str = Field(
|
|
23
|
+
...,
|
|
24
|
+
description="The JSON-encoded log data to be written to the file",
|
|
25
|
+
)
|
|
26
|
+
model_name: str = Field(
|
|
27
|
+
default="unknown",
|
|
28
|
+
description="The model name for context",
|
|
29
|
+
)
|
|
30
|
+
usage_id: str = Field(
|
|
31
|
+
default="default",
|
|
32
|
+
description="The LLM usage_id that produced this log",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
def __str__(self) -> str:
|
|
36
|
+
return (
|
|
37
|
+
f"LLMCompletionLog(usage_id={self.usage_id}, model={self.model_name}, "
|
|
38
|
+
f"file={self.filename})"
|
|
39
|
+
)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from openhands.sdk.event.llm_convertible.action import ActionEvent
|
|
2
|
+
from openhands.sdk.event.llm_convertible.message import MessageEvent
|
|
3
|
+
from openhands.sdk.event.llm_convertible.observation import (
|
|
4
|
+
AgentErrorEvent,
|
|
5
|
+
ObservationBaseEvent,
|
|
6
|
+
ObservationEvent,
|
|
7
|
+
UserRejectObservation,
|
|
8
|
+
)
|
|
9
|
+
from openhands.sdk.event.llm_convertible.system import SystemPromptEvent
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"SystemPromptEvent",
|
|
14
|
+
"ActionEvent",
|
|
15
|
+
"ObservationEvent",
|
|
16
|
+
"ObservationBaseEvent",
|
|
17
|
+
"MessageEvent",
|
|
18
|
+
"AgentErrorEvent",
|
|
19
|
+
"UserRejectObservation",
|
|
20
|
+
]
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
|
|
3
|
+
from pydantic import Field
|
|
4
|
+
from rich.text import Text
|
|
5
|
+
|
|
6
|
+
from openhands.sdk.event.base import N_CHAR_PREVIEW, EventID, LLMConvertibleEvent
|
|
7
|
+
from openhands.sdk.event.types import SourceType, ToolCallID
|
|
8
|
+
from openhands.sdk.llm import (
|
|
9
|
+
Message,
|
|
10
|
+
MessageToolCall,
|
|
11
|
+
ReasoningItemModel,
|
|
12
|
+
RedactedThinkingBlock,
|
|
13
|
+
TextContent,
|
|
14
|
+
ThinkingBlock,
|
|
15
|
+
)
|
|
16
|
+
from openhands.sdk.security import risk
|
|
17
|
+
from openhands.sdk.tool.schema import Action
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ActionEvent(LLMConvertibleEvent):
|
|
21
|
+
source: SourceType = "agent"
|
|
22
|
+
thought: Sequence[TextContent] = Field(
|
|
23
|
+
..., description="The thought process of the agent before taking this action"
|
|
24
|
+
)
|
|
25
|
+
reasoning_content: str | None = Field(
|
|
26
|
+
default=None,
|
|
27
|
+
description="Intermediate reasoning/thinking content from reasoning models",
|
|
28
|
+
)
|
|
29
|
+
thinking_blocks: list[ThinkingBlock | RedactedThinkingBlock] = Field(
|
|
30
|
+
default_factory=list,
|
|
31
|
+
description="Anthropic thinking blocks from the LLM response",
|
|
32
|
+
)
|
|
33
|
+
responses_reasoning_item: ReasoningItemModel | None = Field(
|
|
34
|
+
default=None, description="OpenAI Responses reasoning item from model output"
|
|
35
|
+
)
|
|
36
|
+
action: Action | None = Field(
|
|
37
|
+
default=None,
|
|
38
|
+
description="Single tool call returned by LLM (None when non-executable)",
|
|
39
|
+
)
|
|
40
|
+
tool_name: str = Field(..., description="The name of the tool being called")
|
|
41
|
+
tool_call_id: ToolCallID = Field(
|
|
42
|
+
..., description="The unique id returned by LLM API for this tool call"
|
|
43
|
+
)
|
|
44
|
+
tool_call: MessageToolCall = Field(
|
|
45
|
+
...,
|
|
46
|
+
description=(
|
|
47
|
+
"The tool call received from the LLM response. We keep a copy of it "
|
|
48
|
+
"so it is easier to construct it into LLM message"
|
|
49
|
+
"This could be different from `action`: e.g., `tool_call` may contain "
|
|
50
|
+
"`security_risk` field predicted by LLM when LLM risk analyzer is enabled"
|
|
51
|
+
", while `action` does not."
|
|
52
|
+
),
|
|
53
|
+
)
|
|
54
|
+
llm_response_id: EventID = Field(
|
|
55
|
+
description=(
|
|
56
|
+
"Completion or Response ID of the LLM response that generated this event"
|
|
57
|
+
"E.g., Can be used to group related actions from same LLM response. "
|
|
58
|
+
"This helps in tracking and managing results of parallel function calling "
|
|
59
|
+
"from the same LLM response."
|
|
60
|
+
),
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
security_risk: risk.SecurityRisk = Field(
|
|
64
|
+
default=risk.SecurityRisk.UNKNOWN,
|
|
65
|
+
description="The LLM's assessment of the safety risk of this action.",
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def visualize(self) -> Text:
|
|
70
|
+
"""Return Rich Text representation of this action event."""
|
|
71
|
+
content = Text()
|
|
72
|
+
|
|
73
|
+
if self.security_risk != risk.SecurityRisk.UNKNOWN:
|
|
74
|
+
content.append(self.security_risk.visualize)
|
|
75
|
+
|
|
76
|
+
# Display reasoning content first if available
|
|
77
|
+
if self.reasoning_content:
|
|
78
|
+
content.append("Reasoning:\n", style="bold")
|
|
79
|
+
content.append(self.reasoning_content)
|
|
80
|
+
content.append("\n\n")
|
|
81
|
+
|
|
82
|
+
# Display complete thought content
|
|
83
|
+
thought_text = " ".join([t.text for t in self.thought])
|
|
84
|
+
if thought_text:
|
|
85
|
+
content.append("Thought:\n", style="bold")
|
|
86
|
+
content.append(thought_text)
|
|
87
|
+
content.append("\n\n")
|
|
88
|
+
|
|
89
|
+
# Responses API reasoning (plaintext only; never render encrypted_content)
|
|
90
|
+
reasoning_item = self.responses_reasoning_item
|
|
91
|
+
if reasoning_item is not None:
|
|
92
|
+
content.append("Reasoning:\n", style="bold")
|
|
93
|
+
if reasoning_item.summary:
|
|
94
|
+
for s in reasoning_item.summary:
|
|
95
|
+
content.append(f"- {s}\n")
|
|
96
|
+
if reasoning_item.content:
|
|
97
|
+
for b in reasoning_item.content:
|
|
98
|
+
content.append(f"{b}\n")
|
|
99
|
+
|
|
100
|
+
# Display action information using action's visualize method
|
|
101
|
+
if self.action:
|
|
102
|
+
content.append(self.action.visualize)
|
|
103
|
+
else:
|
|
104
|
+
# When action is None (non-executable), show the function call
|
|
105
|
+
content.append("Function call:\n", style="bold")
|
|
106
|
+
content.append(f"- {self.tool_call.name} ({self.tool_call.id})\n")
|
|
107
|
+
|
|
108
|
+
return content
|
|
109
|
+
|
|
110
|
+
def to_llm_message(self) -> Message:
|
|
111
|
+
"""Individual message - may be incomplete for multi-action batches"""
|
|
112
|
+
return Message(
|
|
113
|
+
role="assistant",
|
|
114
|
+
content=self.thought,
|
|
115
|
+
tool_calls=[self.tool_call],
|
|
116
|
+
reasoning_content=self.reasoning_content,
|
|
117
|
+
thinking_blocks=self.thinking_blocks,
|
|
118
|
+
responses_reasoning_item=self.responses_reasoning_item,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
def __str__(self) -> str:
|
|
122
|
+
"""Plain text string representation for ActionEvent."""
|
|
123
|
+
base_str = f"{self.__class__.__name__} ({self.source})"
|
|
124
|
+
thought_text = " ".join([t.text for t in self.thought])
|
|
125
|
+
thought_preview = (
|
|
126
|
+
thought_text[:N_CHAR_PREVIEW] + "..."
|
|
127
|
+
if len(thought_text) > N_CHAR_PREVIEW
|
|
128
|
+
else thought_text
|
|
129
|
+
)
|
|
130
|
+
if self.action:
|
|
131
|
+
action_name = self.action.__class__.__name__
|
|
132
|
+
return f"{base_str}\n Thought: {thought_preview}\n Action: {action_name}"
|
|
133
|
+
else:
|
|
134
|
+
# When action is None (non-executable), show the tool call
|
|
135
|
+
call = f"{self.tool_call.name}:{self.tool_call.id}"
|
|
136
|
+
return (
|
|
137
|
+
f"{base_str}\n Thought: {thought_preview}\n Action: (not executed)"
|
|
138
|
+
f"\n Call: {call}"
|
|
139
|
+
)
|