openhands-sdk 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/__init__.py +111 -0
- openhands/sdk/agent/__init__.py +8 -0
- openhands/sdk/agent/agent.py +650 -0
- openhands/sdk/agent/base.py +457 -0
- openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
- openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +2 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
- openhands/sdk/agent/prompts/security_policy.j2 +22 -0
- openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
- openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
- openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
- openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
- openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
- openhands/sdk/agent/utils.py +228 -0
- openhands/sdk/context/__init__.py +28 -0
- openhands/sdk/context/agent_context.py +264 -0
- openhands/sdk/context/condenser/__init__.py +18 -0
- openhands/sdk/context/condenser/base.py +100 -0
- openhands/sdk/context/condenser/llm_summarizing_condenser.py +248 -0
- openhands/sdk/context/condenser/no_op_condenser.py +14 -0
- openhands/sdk/context/condenser/pipeline_condenser.py +56 -0
- openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
- openhands/sdk/context/condenser/utils.py +149 -0
- openhands/sdk/context/prompts/__init__.py +6 -0
- openhands/sdk/context/prompts/prompt.py +114 -0
- openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
- openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
- openhands/sdk/context/skills/__init__.py +28 -0
- openhands/sdk/context/skills/exceptions.py +11 -0
- openhands/sdk/context/skills/skill.py +720 -0
- openhands/sdk/context/skills/trigger.py +36 -0
- openhands/sdk/context/skills/types.py +48 -0
- openhands/sdk/context/view.py +503 -0
- openhands/sdk/conversation/__init__.py +40 -0
- openhands/sdk/conversation/base.py +281 -0
- openhands/sdk/conversation/conversation.py +152 -0
- openhands/sdk/conversation/conversation_stats.py +85 -0
- openhands/sdk/conversation/event_store.py +157 -0
- openhands/sdk/conversation/events_list_base.py +17 -0
- openhands/sdk/conversation/exceptions.py +50 -0
- openhands/sdk/conversation/fifo_lock.py +133 -0
- openhands/sdk/conversation/impl/__init__.py +5 -0
- openhands/sdk/conversation/impl/local_conversation.py +665 -0
- openhands/sdk/conversation/impl/remote_conversation.py +956 -0
- openhands/sdk/conversation/persistence_const.py +9 -0
- openhands/sdk/conversation/response_utils.py +41 -0
- openhands/sdk/conversation/secret_registry.py +126 -0
- openhands/sdk/conversation/serialization_diff.py +0 -0
- openhands/sdk/conversation/state.py +392 -0
- openhands/sdk/conversation/stuck_detector.py +311 -0
- openhands/sdk/conversation/title_utils.py +191 -0
- openhands/sdk/conversation/types.py +45 -0
- openhands/sdk/conversation/visualizer/__init__.py +12 -0
- openhands/sdk/conversation/visualizer/base.py +67 -0
- openhands/sdk/conversation/visualizer/default.py +373 -0
- openhands/sdk/critic/__init__.py +15 -0
- openhands/sdk/critic/base.py +38 -0
- openhands/sdk/critic/impl/__init__.py +12 -0
- openhands/sdk/critic/impl/agent_finished.py +83 -0
- openhands/sdk/critic/impl/empty_patch.py +49 -0
- openhands/sdk/critic/impl/pass_critic.py +42 -0
- openhands/sdk/event/__init__.py +42 -0
- openhands/sdk/event/base.py +149 -0
- openhands/sdk/event/condenser.py +82 -0
- openhands/sdk/event/conversation_error.py +25 -0
- openhands/sdk/event/conversation_state.py +104 -0
- openhands/sdk/event/llm_completion_log.py +39 -0
- openhands/sdk/event/llm_convertible/__init__.py +20 -0
- openhands/sdk/event/llm_convertible/action.py +139 -0
- openhands/sdk/event/llm_convertible/message.py +142 -0
- openhands/sdk/event/llm_convertible/observation.py +141 -0
- openhands/sdk/event/llm_convertible/system.py +61 -0
- openhands/sdk/event/token.py +16 -0
- openhands/sdk/event/types.py +11 -0
- openhands/sdk/event/user_action.py +21 -0
- openhands/sdk/git/exceptions.py +43 -0
- openhands/sdk/git/git_changes.py +249 -0
- openhands/sdk/git/git_diff.py +129 -0
- openhands/sdk/git/models.py +21 -0
- openhands/sdk/git/utils.py +189 -0
- openhands/sdk/hooks/__init__.py +30 -0
- openhands/sdk/hooks/config.py +180 -0
- openhands/sdk/hooks/conversation_hooks.py +227 -0
- openhands/sdk/hooks/executor.py +155 -0
- openhands/sdk/hooks/manager.py +170 -0
- openhands/sdk/hooks/types.py +40 -0
- openhands/sdk/io/__init__.py +6 -0
- openhands/sdk/io/base.py +48 -0
- openhands/sdk/io/cache.py +85 -0
- openhands/sdk/io/local.py +119 -0
- openhands/sdk/io/memory.py +54 -0
- openhands/sdk/llm/__init__.py +45 -0
- openhands/sdk/llm/exceptions/__init__.py +45 -0
- openhands/sdk/llm/exceptions/classifier.py +50 -0
- openhands/sdk/llm/exceptions/mapping.py +54 -0
- openhands/sdk/llm/exceptions/types.py +101 -0
- openhands/sdk/llm/llm.py +1140 -0
- openhands/sdk/llm/llm_registry.py +122 -0
- openhands/sdk/llm/llm_response.py +59 -0
- openhands/sdk/llm/message.py +656 -0
- openhands/sdk/llm/mixins/fn_call_converter.py +1288 -0
- openhands/sdk/llm/mixins/non_native_fc.py +97 -0
- openhands/sdk/llm/options/__init__.py +1 -0
- openhands/sdk/llm/options/chat_options.py +93 -0
- openhands/sdk/llm/options/common.py +19 -0
- openhands/sdk/llm/options/responses_options.py +67 -0
- openhands/sdk/llm/router/__init__.py +10 -0
- openhands/sdk/llm/router/base.py +117 -0
- openhands/sdk/llm/router/impl/multimodal.py +76 -0
- openhands/sdk/llm/router/impl/random.py +22 -0
- openhands/sdk/llm/streaming.py +9 -0
- openhands/sdk/llm/utils/metrics.py +312 -0
- openhands/sdk/llm/utils/model_features.py +192 -0
- openhands/sdk/llm/utils/model_info.py +90 -0
- openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
- openhands/sdk/llm/utils/retry_mixin.py +128 -0
- openhands/sdk/llm/utils/telemetry.py +362 -0
- openhands/sdk/llm/utils/unverified_models.py +156 -0
- openhands/sdk/llm/utils/verified_models.py +65 -0
- openhands/sdk/logger/__init__.py +22 -0
- openhands/sdk/logger/logger.py +195 -0
- openhands/sdk/logger/rolling.py +113 -0
- openhands/sdk/mcp/__init__.py +24 -0
- openhands/sdk/mcp/client.py +76 -0
- openhands/sdk/mcp/definition.py +106 -0
- openhands/sdk/mcp/exceptions.py +19 -0
- openhands/sdk/mcp/tool.py +270 -0
- openhands/sdk/mcp/utils.py +83 -0
- openhands/sdk/observability/__init__.py +4 -0
- openhands/sdk/observability/laminar.py +166 -0
- openhands/sdk/observability/utils.py +20 -0
- openhands/sdk/py.typed +0 -0
- openhands/sdk/secret/__init__.py +19 -0
- openhands/sdk/secret/secrets.py +92 -0
- openhands/sdk/security/__init__.py +6 -0
- openhands/sdk/security/analyzer.py +111 -0
- openhands/sdk/security/confirmation_policy.py +61 -0
- openhands/sdk/security/llm_analyzer.py +29 -0
- openhands/sdk/security/risk.py +100 -0
- openhands/sdk/tool/__init__.py +34 -0
- openhands/sdk/tool/builtins/__init__.py +34 -0
- openhands/sdk/tool/builtins/finish.py +106 -0
- openhands/sdk/tool/builtins/think.py +117 -0
- openhands/sdk/tool/registry.py +184 -0
- openhands/sdk/tool/schema.py +286 -0
- openhands/sdk/tool/spec.py +39 -0
- openhands/sdk/tool/tool.py +481 -0
- openhands/sdk/utils/__init__.py +22 -0
- openhands/sdk/utils/async_executor.py +115 -0
- openhands/sdk/utils/async_utils.py +39 -0
- openhands/sdk/utils/cipher.py +68 -0
- openhands/sdk/utils/command.py +90 -0
- openhands/sdk/utils/deprecation.py +166 -0
- openhands/sdk/utils/github.py +44 -0
- openhands/sdk/utils/json.py +48 -0
- openhands/sdk/utils/models.py +570 -0
- openhands/sdk/utils/paging.py +63 -0
- openhands/sdk/utils/pydantic_diff.py +85 -0
- openhands/sdk/utils/pydantic_secrets.py +64 -0
- openhands/sdk/utils/truncate.py +117 -0
- openhands/sdk/utils/visualize.py +58 -0
- openhands/sdk/workspace/__init__.py +17 -0
- openhands/sdk/workspace/base.py +158 -0
- openhands/sdk/workspace/local.py +189 -0
- openhands/sdk/workspace/models.py +35 -0
- openhands/sdk/workspace/remote/__init__.py +8 -0
- openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
- openhands/sdk/workspace/remote/base.py +164 -0
- openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
- openhands/sdk/workspace/workspace.py +49 -0
- openhands_sdk-1.7.3.dist-info/METADATA +17 -0
- openhands_sdk-1.7.3.dist-info/RECORD +180 -0
- openhands_sdk-1.7.3.dist-info/WHEEL +5 -0
- openhands_sdk-1.7.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Trigger types for skills.
|
|
2
|
+
|
|
3
|
+
This module defines different trigger types that determine when a skill
|
|
4
|
+
should be activated.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from abc import ABC
|
|
8
|
+
from typing import Literal
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseTrigger(BaseModel, ABC):
|
|
14
|
+
"""Base class for all trigger types."""
|
|
15
|
+
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class KeywordTrigger(BaseTrigger):
|
|
20
|
+
"""Trigger for keyword-based skills.
|
|
21
|
+
|
|
22
|
+
These skills are activated when specific keywords appear in the user's query.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
type: Literal["keyword"] = "keyword"
|
|
26
|
+
keywords: list[str]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TaskTrigger(BaseTrigger):
|
|
30
|
+
"""Trigger for task-specific skills.
|
|
31
|
+
|
|
32
|
+
These skills are activated for specific task types and can modify prompts.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
type: Literal["task"] = "task"
|
|
36
|
+
triggers: list[str]
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from datetime import UTC, datetime
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class InputMetadata(BaseModel):
|
|
7
|
+
"""Metadata for task skill inputs."""
|
|
8
|
+
|
|
9
|
+
name: str = Field(description="Name of the input parameter")
|
|
10
|
+
description: str = Field(description="Description of the input parameter")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SkillKnowledge(BaseModel):
|
|
14
|
+
"""Represents knowledge from a triggered skill."""
|
|
15
|
+
|
|
16
|
+
name: str = Field(description="The name of the skill that was triggered")
|
|
17
|
+
trigger: str = Field(description="The word that triggered this skill")
|
|
18
|
+
content: str = Field(description="The actual content/knowledge from the skill")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class SkillResponse(BaseModel):
|
|
22
|
+
"""Response model for skills endpoint.
|
|
23
|
+
|
|
24
|
+
Note: This model only includes basic metadata that can be determined
|
|
25
|
+
without parsing skill content. Use the separate content API
|
|
26
|
+
to get detailed skill information.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
name: str = Field(description="The name of the skill")
|
|
30
|
+
path: str = Field(description="The path or identifier of the skill")
|
|
31
|
+
created_at: datetime = Field(
|
|
32
|
+
default_factory=lambda: datetime.now(UTC),
|
|
33
|
+
description="Timestamp when the skill was created",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class SkillContentResponse(BaseModel):
|
|
38
|
+
"""Response model for individual skill content endpoint."""
|
|
39
|
+
|
|
40
|
+
content: str = Field(description="The full content of the skill")
|
|
41
|
+
path: str = Field(description="The path or identifier of the skill")
|
|
42
|
+
triggers: list[str] = Field(
|
|
43
|
+
description="List of triggers associated with the skill"
|
|
44
|
+
)
|
|
45
|
+
git_provider: str | None = Field(
|
|
46
|
+
None,
|
|
47
|
+
description="Git provider if the skill is sourced from a Git repository",
|
|
48
|
+
)
|
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections import defaultdict
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from functools import cached_property
|
|
6
|
+
from logging import getLogger
|
|
7
|
+
from typing import overload
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, computed_field
|
|
10
|
+
|
|
11
|
+
from openhands.sdk.event import (
|
|
12
|
+
Condensation,
|
|
13
|
+
CondensationRequest,
|
|
14
|
+
CondensationSummaryEvent,
|
|
15
|
+
LLMConvertibleEvent,
|
|
16
|
+
)
|
|
17
|
+
from openhands.sdk.event.base import Event, EventID
|
|
18
|
+
from openhands.sdk.event.llm_convertible import (
|
|
19
|
+
ActionEvent,
|
|
20
|
+
ObservationBaseEvent,
|
|
21
|
+
)
|
|
22
|
+
from openhands.sdk.event.types import ToolCallID
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
logger = getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ActionBatch(BaseModel):
|
|
29
|
+
"""Represents a batch of ActionEvents grouped by llm_response_id.
|
|
30
|
+
|
|
31
|
+
This is a utility class used to help detect and manage batches of ActionEvents
|
|
32
|
+
that share the same llm_response_id, which indicates they were generated together
|
|
33
|
+
by the LLM. This is important for ensuring atomicity when manipulating events
|
|
34
|
+
in a View, such as during condensation.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
batches: dict[EventID, list[EventID]]
|
|
38
|
+
"""dict mapping llm_response_id to list of ActionEvent IDs"""
|
|
39
|
+
|
|
40
|
+
action_id_to_response_id: dict[EventID, EventID]
|
|
41
|
+
"""dict mapping ActionEvent ID to llm_response_id"""
|
|
42
|
+
|
|
43
|
+
action_id_to_tool_call_id: dict[EventID, ToolCallID]
|
|
44
|
+
"""dict mapping ActionEvent ID to tool_call_id"""
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def from_events(
|
|
48
|
+
events: Sequence[Event],
|
|
49
|
+
) -> ActionBatch:
|
|
50
|
+
"""Build a map of llm_response_id -> list of ActionEvent IDs."""
|
|
51
|
+
batches: dict[EventID, list[EventID]] = defaultdict(list)
|
|
52
|
+
action_id_to_response_id: dict[EventID, EventID] = {}
|
|
53
|
+
action_id_to_tool_call_id: dict[EventID, ToolCallID] = {}
|
|
54
|
+
|
|
55
|
+
for event in events:
|
|
56
|
+
if isinstance(event, ActionEvent):
|
|
57
|
+
llm_response_id = event.llm_response_id
|
|
58
|
+
batches[llm_response_id].append(event.id)
|
|
59
|
+
action_id_to_response_id[event.id] = llm_response_id
|
|
60
|
+
if event.tool_call_id is not None:
|
|
61
|
+
action_id_to_tool_call_id[event.id] = event.tool_call_id
|
|
62
|
+
|
|
63
|
+
return ActionBatch(
|
|
64
|
+
batches=batches,
|
|
65
|
+
action_id_to_response_id=action_id_to_response_id,
|
|
66
|
+
action_id_to_tool_call_id=action_id_to_tool_call_id,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class View(BaseModel):
|
|
71
|
+
"""Linearly ordered view of events.
|
|
72
|
+
|
|
73
|
+
Produced by a condenser to indicate the included events are ready to process as LLM
|
|
74
|
+
input. Also contains fields with information from the condensation process to aid
|
|
75
|
+
in deciding whether further condensation is needed.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
events: list[LLMConvertibleEvent]
|
|
79
|
+
|
|
80
|
+
unhandled_condensation_request: bool = False
|
|
81
|
+
"""Whether there is an unhandled condensation request in the view."""
|
|
82
|
+
|
|
83
|
+
condensations: list[Condensation] = []
|
|
84
|
+
"""A list of condensations that were processed to produce the view."""
|
|
85
|
+
|
|
86
|
+
def __len__(self) -> int:
|
|
87
|
+
return len(self.events)
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def most_recent_condensation(self) -> Condensation | None:
|
|
91
|
+
"""Return the most recent condensation, or None if no condensations exist."""
|
|
92
|
+
return self.condensations[-1] if self.condensations else None
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def summary_event_index(self) -> int | None:
|
|
96
|
+
"""Return the index of the summary event, or None if no summary exists."""
|
|
97
|
+
recent_condensation = self.most_recent_condensation
|
|
98
|
+
if (
|
|
99
|
+
recent_condensation is not None
|
|
100
|
+
and recent_condensation.summary is not None
|
|
101
|
+
and recent_condensation.summary_offset is not None
|
|
102
|
+
):
|
|
103
|
+
return recent_condensation.summary_offset
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def summary_event(self) -> CondensationSummaryEvent | None:
|
|
108
|
+
"""Return the summary event, or None if no summary exists."""
|
|
109
|
+
if self.summary_event_index is not None:
|
|
110
|
+
event = self.events[self.summary_event_index]
|
|
111
|
+
if isinstance(event, CondensationSummaryEvent):
|
|
112
|
+
return event
|
|
113
|
+
return None
|
|
114
|
+
|
|
115
|
+
@computed_field # type: ignore[prop-decorator]
|
|
116
|
+
@cached_property
|
|
117
|
+
def manipulation_indices(self) -> list[int]:
|
|
118
|
+
"""Return cached manipulation indices for this view's events.
|
|
119
|
+
|
|
120
|
+
These indices represent boundaries between atomic units where events can be
|
|
121
|
+
safely manipulated (inserted or forgotten). An atomic unit is either:
|
|
122
|
+
- A tool loop: a sequence of batches starting with thinking blocks and
|
|
123
|
+
continuing through all subsequent batches until a non-batch event
|
|
124
|
+
- A batch of ActionEvents with the same llm_response_id and their
|
|
125
|
+
corresponding ObservationBaseEvents (when not part of a tool loop)
|
|
126
|
+
- A single event that is neither an ActionEvent nor an ObservationBaseEvent
|
|
127
|
+
|
|
128
|
+
Tool loops are identified by thinking blocks and must remain atomic to
|
|
129
|
+
preserve Claude API requirements that the final assistant message must
|
|
130
|
+
have thinking blocks when thinking is enabled.
|
|
131
|
+
|
|
132
|
+
The returned indices can be used for:
|
|
133
|
+
- Inserting new events: any returned index is safe
|
|
134
|
+
- Forgetting events: select a range between two consecutive indices
|
|
135
|
+
|
|
136
|
+
Consecutive indices define atomic units that must stay together:
|
|
137
|
+
- events[indices[i]:indices[i+1]] is an atomic unit
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Sorted list of indices representing atomic unit boundaries. Always
|
|
141
|
+
includes 0 and len(events) as boundaries.
|
|
142
|
+
"""
|
|
143
|
+
if not self.events:
|
|
144
|
+
return [0]
|
|
145
|
+
|
|
146
|
+
# Build mapping of llm_response_id -> list of event indices
|
|
147
|
+
batches: dict[EventID, list[int]] = {}
|
|
148
|
+
for idx, event in enumerate(self.events):
|
|
149
|
+
if isinstance(event, ActionEvent):
|
|
150
|
+
llm_response_id = event.llm_response_id
|
|
151
|
+
if llm_response_id not in batches:
|
|
152
|
+
batches[llm_response_id] = []
|
|
153
|
+
batches[llm_response_id].append(idx)
|
|
154
|
+
|
|
155
|
+
# Build mapping of tool_call_id -> observation indices
|
|
156
|
+
observation_indices: dict[ToolCallID, int] = {}
|
|
157
|
+
for idx, event in enumerate(self.events):
|
|
158
|
+
if (
|
|
159
|
+
isinstance(event, ObservationBaseEvent)
|
|
160
|
+
and event.tool_call_id is not None
|
|
161
|
+
):
|
|
162
|
+
observation_indices[event.tool_call_id] = idx
|
|
163
|
+
|
|
164
|
+
# For each batch, find the range of indices that includes all actions
|
|
165
|
+
# and their corresponding observations, and track if batch has thinking blocks
|
|
166
|
+
batch_ranges: list[tuple[int, int, bool]] = []
|
|
167
|
+
for llm_response_id, action_indices in batches.items():
|
|
168
|
+
min_idx = min(action_indices)
|
|
169
|
+
max_idx = max(action_indices)
|
|
170
|
+
|
|
171
|
+
# Check if this batch has thinking blocks (only first action has them)
|
|
172
|
+
first_action = self.events[min_idx]
|
|
173
|
+
has_thinking = (
|
|
174
|
+
isinstance(first_action, ActionEvent)
|
|
175
|
+
and len(first_action.thinking_blocks) > 0
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
# Extend the range to include all corresponding observations
|
|
179
|
+
for action_idx in action_indices:
|
|
180
|
+
action_event = self.events[action_idx]
|
|
181
|
+
if (
|
|
182
|
+
isinstance(action_event, ActionEvent)
|
|
183
|
+
and action_event.tool_call_id is not None
|
|
184
|
+
):
|
|
185
|
+
if action_event.tool_call_id in observation_indices:
|
|
186
|
+
obs_idx = observation_indices[action_event.tool_call_id]
|
|
187
|
+
max_idx = max(max_idx, obs_idx)
|
|
188
|
+
|
|
189
|
+
batch_ranges.append((min_idx, max_idx, has_thinking))
|
|
190
|
+
|
|
191
|
+
# Sort batch ranges by start index for tool loop detection
|
|
192
|
+
batch_ranges.sort(key=lambda x: x[0])
|
|
193
|
+
|
|
194
|
+
# Identify tool loops: A tool loop starts with a batch that has thinking
|
|
195
|
+
# blocks and continues through all subsequent batches until we hit a
|
|
196
|
+
# non-ActionEvent/ObservationEvent (like a user MessageEvent).
|
|
197
|
+
tool_loop_ranges: list[tuple[int, int]] = []
|
|
198
|
+
if batch_ranges:
|
|
199
|
+
i = 0
|
|
200
|
+
while i < len(batch_ranges):
|
|
201
|
+
min_idx, max_idx, has_thinking = batch_ranges[i]
|
|
202
|
+
|
|
203
|
+
# If this batch has thinking blocks, start a tool loop
|
|
204
|
+
if has_thinking:
|
|
205
|
+
loop_start = min_idx
|
|
206
|
+
loop_end = max_idx
|
|
207
|
+
|
|
208
|
+
# Continue through ALL subsequent batches until we hit
|
|
209
|
+
# a non-batch event
|
|
210
|
+
j = i + 1
|
|
211
|
+
while j < len(batch_ranges):
|
|
212
|
+
next_min, next_max, _ = batch_ranges[j]
|
|
213
|
+
|
|
214
|
+
# Check if there's a non-batch event between current
|
|
215
|
+
# and next batch
|
|
216
|
+
has_non_batch_between = False
|
|
217
|
+
for k in range(loop_end + 1, next_min):
|
|
218
|
+
event = self.events[k]
|
|
219
|
+
if not isinstance(
|
|
220
|
+
event, (ActionEvent, ObservationBaseEvent)
|
|
221
|
+
):
|
|
222
|
+
has_non_batch_between = True
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
if has_non_batch_between:
|
|
226
|
+
# Tool loop ends before this non-batch event
|
|
227
|
+
break
|
|
228
|
+
|
|
229
|
+
# Include this batch in the tool loop
|
|
230
|
+
loop_end = max(loop_end, next_max)
|
|
231
|
+
j += 1
|
|
232
|
+
|
|
233
|
+
tool_loop_ranges.append((loop_start, loop_end))
|
|
234
|
+
i = j
|
|
235
|
+
else:
|
|
236
|
+
i += 1
|
|
237
|
+
|
|
238
|
+
# Merge batch ranges that are part of tool loops
|
|
239
|
+
# Create a mapping of batch index ranges to whether they're in a tool loop
|
|
240
|
+
merged_ranges: list[tuple[int, int]] = []
|
|
241
|
+
|
|
242
|
+
if tool_loop_ranges:
|
|
243
|
+
# Add tool loop ranges as atomic units
|
|
244
|
+
merged_ranges.extend(tool_loop_ranges)
|
|
245
|
+
|
|
246
|
+
# Add non-tool-loop batch ranges
|
|
247
|
+
tool_loop_indices = set()
|
|
248
|
+
for loop_start, loop_end in tool_loop_ranges:
|
|
249
|
+
tool_loop_indices.update(range(loop_start, loop_end + 1))
|
|
250
|
+
|
|
251
|
+
for min_idx, max_idx, has_thinking in batch_ranges:
|
|
252
|
+
# Only add if not already covered by a tool loop
|
|
253
|
+
if min_idx not in tool_loop_indices:
|
|
254
|
+
merged_ranges.append((min_idx, max_idx))
|
|
255
|
+
else:
|
|
256
|
+
# No tool loops, just use regular batch ranges
|
|
257
|
+
merged_ranges = [(min_idx, max_idx) for min_idx, max_idx, _ in batch_ranges]
|
|
258
|
+
|
|
259
|
+
# Start with all possible indices (subtractive approach)
|
|
260
|
+
result_indices = set(range(len(self.events) + 1))
|
|
261
|
+
|
|
262
|
+
# Remove indices inside merged ranges (keep only boundaries)
|
|
263
|
+
for min_idx, max_idx in merged_ranges:
|
|
264
|
+
# Remove interior indices, keeping min_idx and max_idx+1 as boundaries
|
|
265
|
+
for idx in range(min_idx + 1, max_idx + 1):
|
|
266
|
+
result_indices.discard(idx)
|
|
267
|
+
|
|
268
|
+
return sorted(result_indices)
|
|
269
|
+
|
|
270
|
+
# To preserve list-like indexing, we ideally support slicing and position-based
|
|
271
|
+
# indexing. The only challenge with that is switching the return type based on the
|
|
272
|
+
# input type -- we can mark the different signatures for MyPy with `@overload`
|
|
273
|
+
# decorators.
|
|
274
|
+
|
|
275
|
+
@overload
|
|
276
|
+
def __getitem__(self, key: slice) -> list[LLMConvertibleEvent]: ...
|
|
277
|
+
|
|
278
|
+
@overload
|
|
279
|
+
def __getitem__(self, key: int) -> LLMConvertibleEvent: ...
|
|
280
|
+
|
|
281
|
+
def __getitem__(
|
|
282
|
+
self, key: int | slice
|
|
283
|
+
) -> LLMConvertibleEvent | list[LLMConvertibleEvent]:
|
|
284
|
+
if isinstance(key, slice):
|
|
285
|
+
start, stop, step = key.indices(len(self))
|
|
286
|
+
return [self[i] for i in range(start, stop, step)]
|
|
287
|
+
elif isinstance(key, int):
|
|
288
|
+
return self.events[key]
|
|
289
|
+
else:
|
|
290
|
+
raise ValueError(f"Invalid key type: {type(key)}")
|
|
291
|
+
|
|
292
|
+
@staticmethod
|
|
293
|
+
def _enforce_batch_atomicity(
|
|
294
|
+
events: Sequence[Event],
|
|
295
|
+
removed_event_ids: set[EventID],
|
|
296
|
+
) -> set[EventID]:
|
|
297
|
+
"""Ensure that if any ActionEvent in a batch is removed, all ActionEvents
|
|
298
|
+
in that batch are removed.
|
|
299
|
+
|
|
300
|
+
This prevents partial batches from being sent to the LLM, which can cause
|
|
301
|
+
API errors when thinking blocks are separated from their tool calls.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
events: The original list of events
|
|
305
|
+
removed_event_ids: Set of event IDs that are being removed
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Updated set of event IDs that should be removed (including all
|
|
309
|
+
ActionEvents in batches where any ActionEvent was removed)
|
|
310
|
+
"""
|
|
311
|
+
action_batch = ActionBatch.from_events(events)
|
|
312
|
+
|
|
313
|
+
if not action_batch.batches:
|
|
314
|
+
return removed_event_ids
|
|
315
|
+
|
|
316
|
+
updated_removed_ids = set(removed_event_ids)
|
|
317
|
+
|
|
318
|
+
for llm_response_id, batch_event_ids in action_batch.batches.items():
|
|
319
|
+
# Check if any ActionEvent in this batch is being removed
|
|
320
|
+
if any(event_id in removed_event_ids for event_id in batch_event_ids):
|
|
321
|
+
# If so, remove all ActionEvents in this batch
|
|
322
|
+
updated_removed_ids.update(batch_event_ids)
|
|
323
|
+
logger.debug(
|
|
324
|
+
f"Enforcing batch atomicity: removing entire batch "
|
|
325
|
+
f"with llm_response_id={llm_response_id} "
|
|
326
|
+
f"({len(batch_event_ids)} events)"
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
return updated_removed_ids
|
|
330
|
+
|
|
331
|
+
@staticmethod
|
|
332
|
+
def filter_unmatched_tool_calls(
|
|
333
|
+
events: list[LLMConvertibleEvent],
|
|
334
|
+
) -> list[LLMConvertibleEvent]:
|
|
335
|
+
"""Filter out unmatched tool call events.
|
|
336
|
+
|
|
337
|
+
Removes ActionEvents and ObservationEvents that have tool_call_ids
|
|
338
|
+
but don't have matching pairs. Also enforces batch atomicity - if any
|
|
339
|
+
ActionEvent in a batch is filtered out, all ActionEvents in that batch
|
|
340
|
+
are also filtered out.
|
|
341
|
+
"""
|
|
342
|
+
action_tool_call_ids = View._get_action_tool_call_ids(events)
|
|
343
|
+
observation_tool_call_ids = View._get_observation_tool_call_ids(events)
|
|
344
|
+
|
|
345
|
+
# Build batch info for batch atomicity enforcement
|
|
346
|
+
action_batch = ActionBatch.from_events(events)
|
|
347
|
+
|
|
348
|
+
# First pass: identify which events would NOT be kept based on matching
|
|
349
|
+
removed_event_ids: set[EventID] = set()
|
|
350
|
+
for event in events:
|
|
351
|
+
if not View._should_keep_event(
|
|
352
|
+
event, action_tool_call_ids, observation_tool_call_ids
|
|
353
|
+
):
|
|
354
|
+
removed_event_ids.add(event.id)
|
|
355
|
+
|
|
356
|
+
# Second pass: enforce batch atomicity for ActionEvents
|
|
357
|
+
# If any ActionEvent in a batch is removed, all ActionEvents in that
|
|
358
|
+
# batch should also be removed
|
|
359
|
+
removed_event_ids = View._enforce_batch_atomicity(events, removed_event_ids)
|
|
360
|
+
|
|
361
|
+
# Third pass: also remove ObservationEvents whose ActionEvents were removed
|
|
362
|
+
# due to batch atomicity
|
|
363
|
+
tool_call_ids_to_remove: set[ToolCallID] = set()
|
|
364
|
+
for action_id in removed_event_ids:
|
|
365
|
+
if action_id in action_batch.action_id_to_tool_call_id:
|
|
366
|
+
tool_call_ids_to_remove.add(
|
|
367
|
+
action_batch.action_id_to_tool_call_id[action_id]
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
# Filter out removed events
|
|
371
|
+
result = []
|
|
372
|
+
for event in events:
|
|
373
|
+
if event.id in removed_event_ids:
|
|
374
|
+
continue
|
|
375
|
+
if isinstance(event, ObservationBaseEvent):
|
|
376
|
+
if event.tool_call_id in tool_call_ids_to_remove:
|
|
377
|
+
continue
|
|
378
|
+
result.append(event)
|
|
379
|
+
|
|
380
|
+
return result
|
|
381
|
+
|
|
382
|
+
@staticmethod
|
|
383
|
+
def _get_action_tool_call_ids(events: list[LLMConvertibleEvent]) -> set[ToolCallID]:
|
|
384
|
+
"""Extract tool_call_ids from ActionEvents."""
|
|
385
|
+
tool_call_ids = set()
|
|
386
|
+
for event in events:
|
|
387
|
+
if isinstance(event, ActionEvent) and event.tool_call_id is not None:
|
|
388
|
+
tool_call_ids.add(event.tool_call_id)
|
|
389
|
+
return tool_call_ids
|
|
390
|
+
|
|
391
|
+
@staticmethod
|
|
392
|
+
def _get_observation_tool_call_ids(
|
|
393
|
+
events: list[LLMConvertibleEvent],
|
|
394
|
+
) -> set[ToolCallID]:
|
|
395
|
+
"""Extract tool_call_ids from ObservationEvents."""
|
|
396
|
+
tool_call_ids = set()
|
|
397
|
+
for event in events:
|
|
398
|
+
if (
|
|
399
|
+
isinstance(event, ObservationBaseEvent)
|
|
400
|
+
and event.tool_call_id is not None
|
|
401
|
+
):
|
|
402
|
+
tool_call_ids.add(event.tool_call_id)
|
|
403
|
+
return tool_call_ids
|
|
404
|
+
|
|
405
|
+
@staticmethod
|
|
406
|
+
def _should_keep_event(
|
|
407
|
+
event: LLMConvertibleEvent,
|
|
408
|
+
action_tool_call_ids: set[ToolCallID],
|
|
409
|
+
observation_tool_call_ids: set[ToolCallID],
|
|
410
|
+
) -> bool:
|
|
411
|
+
"""Determine if an event should be kept based on tool call matching."""
|
|
412
|
+
if isinstance(event, ObservationBaseEvent):
|
|
413
|
+
return event.tool_call_id in action_tool_call_ids
|
|
414
|
+
elif isinstance(event, ActionEvent):
|
|
415
|
+
return event.tool_call_id in observation_tool_call_ids
|
|
416
|
+
else:
|
|
417
|
+
return True
|
|
418
|
+
|
|
419
|
+
def find_next_manipulation_index(self, threshold: int, strict: bool = False) -> int:
|
|
420
|
+
"""Find the smallest manipulation index greater than (or equal to) a threshold.
|
|
421
|
+
|
|
422
|
+
This is a helper method for condensation logic that needs to find safe
|
|
423
|
+
boundaries for forgetting events. Uses the cached manipulation_indices property.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
threshold: The threshold value to compare against
|
|
427
|
+
strict: If True, finds index > threshold. If False, finds index >= threshold
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
The smallest manipulation index that satisfies the condition, or the
|
|
431
|
+
threshold itself if no such index exists
|
|
432
|
+
"""
|
|
433
|
+
for idx in self.manipulation_indices:
|
|
434
|
+
if strict:
|
|
435
|
+
if idx > threshold:
|
|
436
|
+
return idx
|
|
437
|
+
else:
|
|
438
|
+
if idx >= threshold:
|
|
439
|
+
return idx
|
|
440
|
+
return threshold
|
|
441
|
+
|
|
442
|
+
@staticmethod
|
|
443
|
+
def from_events(events: Sequence[Event]) -> View:
|
|
444
|
+
"""Create a view from a list of events, respecting the semantics of any
|
|
445
|
+
condensation events.
|
|
446
|
+
"""
|
|
447
|
+
forgotten_event_ids: set[EventID] = set()
|
|
448
|
+
condensations: list[Condensation] = []
|
|
449
|
+
for event in events:
|
|
450
|
+
if isinstance(event, Condensation):
|
|
451
|
+
condensations.append(event)
|
|
452
|
+
forgotten_event_ids.update(event.forgotten_event_ids)
|
|
453
|
+
# Make sure we also forget the condensation action itself
|
|
454
|
+
forgotten_event_ids.add(event.id)
|
|
455
|
+
if isinstance(event, CondensationRequest):
|
|
456
|
+
forgotten_event_ids.add(event.id)
|
|
457
|
+
|
|
458
|
+
# Enforce batch atomicity: if any event in a multi-action batch is forgotten,
|
|
459
|
+
# forget all events in that batch to prevent partial batches with thinking
|
|
460
|
+
# blocks separated from their tool calls
|
|
461
|
+
forgotten_event_ids = View._enforce_batch_atomicity(events, forgotten_event_ids)
|
|
462
|
+
|
|
463
|
+
kept_events = [
|
|
464
|
+
event
|
|
465
|
+
for event in events
|
|
466
|
+
if event.id not in forgotten_event_ids
|
|
467
|
+
and isinstance(event, LLMConvertibleEvent)
|
|
468
|
+
]
|
|
469
|
+
|
|
470
|
+
# If we have a summary, insert it at the specified offset.
|
|
471
|
+
summary: str | None = None
|
|
472
|
+
summary_offset: int | None = None
|
|
473
|
+
|
|
474
|
+
# The relevant summary is always in the last condensation event (i.e., the most
|
|
475
|
+
# recent one).
|
|
476
|
+
for event in reversed(events):
|
|
477
|
+
if isinstance(event, Condensation):
|
|
478
|
+
if event.summary is not None and event.summary_offset is not None:
|
|
479
|
+
summary = event.summary
|
|
480
|
+
summary_offset = event.summary_offset
|
|
481
|
+
break
|
|
482
|
+
|
|
483
|
+
if summary is not None and summary_offset is not None:
|
|
484
|
+
logger.debug(f"Inserting summary at offset {summary_offset}")
|
|
485
|
+
|
|
486
|
+
_new_summary_event = CondensationSummaryEvent(summary=summary)
|
|
487
|
+
kept_events.insert(summary_offset, _new_summary_event)
|
|
488
|
+
|
|
489
|
+
# Check for an unhandled condensation request -- these are events closer to the
|
|
490
|
+
# end of the list than any condensation action.
|
|
491
|
+
unhandled_condensation_request = False
|
|
492
|
+
for event in reversed(events):
|
|
493
|
+
if isinstance(event, Condensation):
|
|
494
|
+
break
|
|
495
|
+
if isinstance(event, CondensationRequest):
|
|
496
|
+
unhandled_condensation_request = True
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
return View(
|
|
500
|
+
events=View.filter_unmatched_tool_calls(kept_events),
|
|
501
|
+
unhandled_condensation_request=unhandled_condensation_request,
|
|
502
|
+
condensations=condensations,
|
|
503
|
+
)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from openhands.sdk.conversation.base import BaseConversation
|
|
2
|
+
from openhands.sdk.conversation.conversation import Conversation
|
|
3
|
+
from openhands.sdk.conversation.event_store import EventLog
|
|
4
|
+
from openhands.sdk.conversation.events_list_base import EventsListBase
|
|
5
|
+
from openhands.sdk.conversation.impl.local_conversation import LocalConversation
|
|
6
|
+
from openhands.sdk.conversation.impl.remote_conversation import RemoteConversation
|
|
7
|
+
from openhands.sdk.conversation.response_utils import get_agent_final_response
|
|
8
|
+
from openhands.sdk.conversation.secret_registry import SecretRegistry
|
|
9
|
+
from openhands.sdk.conversation.state import (
|
|
10
|
+
ConversationExecutionStatus,
|
|
11
|
+
ConversationState,
|
|
12
|
+
)
|
|
13
|
+
from openhands.sdk.conversation.stuck_detector import StuckDetector
|
|
14
|
+
from openhands.sdk.conversation.types import (
|
|
15
|
+
ConversationCallbackType,
|
|
16
|
+
ConversationTokenCallbackType,
|
|
17
|
+
)
|
|
18
|
+
from openhands.sdk.conversation.visualizer import (
|
|
19
|
+
ConversationVisualizerBase,
|
|
20
|
+
DefaultConversationVisualizer,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"Conversation",
|
|
26
|
+
"BaseConversation",
|
|
27
|
+
"ConversationState",
|
|
28
|
+
"ConversationExecutionStatus",
|
|
29
|
+
"ConversationCallbackType",
|
|
30
|
+
"ConversationTokenCallbackType",
|
|
31
|
+
"DefaultConversationVisualizer",
|
|
32
|
+
"ConversationVisualizerBase",
|
|
33
|
+
"SecretRegistry",
|
|
34
|
+
"StuckDetector",
|
|
35
|
+
"EventLog",
|
|
36
|
+
"LocalConversation",
|
|
37
|
+
"RemoteConversation",
|
|
38
|
+
"EventsListBase",
|
|
39
|
+
"get_agent_final_response",
|
|
40
|
+
]
|