openhands-sdk 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/__init__.py +111 -0
- openhands/sdk/agent/__init__.py +8 -0
- openhands/sdk/agent/agent.py +650 -0
- openhands/sdk/agent/base.py +457 -0
- openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
- openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +2 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
- openhands/sdk/agent/prompts/security_policy.j2 +22 -0
- openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
- openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
- openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
- openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
- openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
- openhands/sdk/agent/utils.py +228 -0
- openhands/sdk/context/__init__.py +28 -0
- openhands/sdk/context/agent_context.py +264 -0
- openhands/sdk/context/condenser/__init__.py +18 -0
- openhands/sdk/context/condenser/base.py +100 -0
- openhands/sdk/context/condenser/llm_summarizing_condenser.py +248 -0
- openhands/sdk/context/condenser/no_op_condenser.py +14 -0
- openhands/sdk/context/condenser/pipeline_condenser.py +56 -0
- openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
- openhands/sdk/context/condenser/utils.py +149 -0
- openhands/sdk/context/prompts/__init__.py +6 -0
- openhands/sdk/context/prompts/prompt.py +114 -0
- openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
- openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
- openhands/sdk/context/skills/__init__.py +28 -0
- openhands/sdk/context/skills/exceptions.py +11 -0
- openhands/sdk/context/skills/skill.py +720 -0
- openhands/sdk/context/skills/trigger.py +36 -0
- openhands/sdk/context/skills/types.py +48 -0
- openhands/sdk/context/view.py +503 -0
- openhands/sdk/conversation/__init__.py +40 -0
- openhands/sdk/conversation/base.py +281 -0
- openhands/sdk/conversation/conversation.py +152 -0
- openhands/sdk/conversation/conversation_stats.py +85 -0
- openhands/sdk/conversation/event_store.py +157 -0
- openhands/sdk/conversation/events_list_base.py +17 -0
- openhands/sdk/conversation/exceptions.py +50 -0
- openhands/sdk/conversation/fifo_lock.py +133 -0
- openhands/sdk/conversation/impl/__init__.py +5 -0
- openhands/sdk/conversation/impl/local_conversation.py +665 -0
- openhands/sdk/conversation/impl/remote_conversation.py +956 -0
- openhands/sdk/conversation/persistence_const.py +9 -0
- openhands/sdk/conversation/response_utils.py +41 -0
- openhands/sdk/conversation/secret_registry.py +126 -0
- openhands/sdk/conversation/serialization_diff.py +0 -0
- openhands/sdk/conversation/state.py +392 -0
- openhands/sdk/conversation/stuck_detector.py +311 -0
- openhands/sdk/conversation/title_utils.py +191 -0
- openhands/sdk/conversation/types.py +45 -0
- openhands/sdk/conversation/visualizer/__init__.py +12 -0
- openhands/sdk/conversation/visualizer/base.py +67 -0
- openhands/sdk/conversation/visualizer/default.py +373 -0
- openhands/sdk/critic/__init__.py +15 -0
- openhands/sdk/critic/base.py +38 -0
- openhands/sdk/critic/impl/__init__.py +12 -0
- openhands/sdk/critic/impl/agent_finished.py +83 -0
- openhands/sdk/critic/impl/empty_patch.py +49 -0
- openhands/sdk/critic/impl/pass_critic.py +42 -0
- openhands/sdk/event/__init__.py +42 -0
- openhands/sdk/event/base.py +149 -0
- openhands/sdk/event/condenser.py +82 -0
- openhands/sdk/event/conversation_error.py +25 -0
- openhands/sdk/event/conversation_state.py +104 -0
- openhands/sdk/event/llm_completion_log.py +39 -0
- openhands/sdk/event/llm_convertible/__init__.py +20 -0
- openhands/sdk/event/llm_convertible/action.py +139 -0
- openhands/sdk/event/llm_convertible/message.py +142 -0
- openhands/sdk/event/llm_convertible/observation.py +141 -0
- openhands/sdk/event/llm_convertible/system.py +61 -0
- openhands/sdk/event/token.py +16 -0
- openhands/sdk/event/types.py +11 -0
- openhands/sdk/event/user_action.py +21 -0
- openhands/sdk/git/exceptions.py +43 -0
- openhands/sdk/git/git_changes.py +249 -0
- openhands/sdk/git/git_diff.py +129 -0
- openhands/sdk/git/models.py +21 -0
- openhands/sdk/git/utils.py +189 -0
- openhands/sdk/hooks/__init__.py +30 -0
- openhands/sdk/hooks/config.py +180 -0
- openhands/sdk/hooks/conversation_hooks.py +227 -0
- openhands/sdk/hooks/executor.py +155 -0
- openhands/sdk/hooks/manager.py +170 -0
- openhands/sdk/hooks/types.py +40 -0
- openhands/sdk/io/__init__.py +6 -0
- openhands/sdk/io/base.py +48 -0
- openhands/sdk/io/cache.py +85 -0
- openhands/sdk/io/local.py +119 -0
- openhands/sdk/io/memory.py +54 -0
- openhands/sdk/llm/__init__.py +45 -0
- openhands/sdk/llm/exceptions/__init__.py +45 -0
- openhands/sdk/llm/exceptions/classifier.py +50 -0
- openhands/sdk/llm/exceptions/mapping.py +54 -0
- openhands/sdk/llm/exceptions/types.py +101 -0
- openhands/sdk/llm/llm.py +1140 -0
- openhands/sdk/llm/llm_registry.py +122 -0
- openhands/sdk/llm/llm_response.py +59 -0
- openhands/sdk/llm/message.py +656 -0
- openhands/sdk/llm/mixins/fn_call_converter.py +1288 -0
- openhands/sdk/llm/mixins/non_native_fc.py +97 -0
- openhands/sdk/llm/options/__init__.py +1 -0
- openhands/sdk/llm/options/chat_options.py +93 -0
- openhands/sdk/llm/options/common.py +19 -0
- openhands/sdk/llm/options/responses_options.py +67 -0
- openhands/sdk/llm/router/__init__.py +10 -0
- openhands/sdk/llm/router/base.py +117 -0
- openhands/sdk/llm/router/impl/multimodal.py +76 -0
- openhands/sdk/llm/router/impl/random.py +22 -0
- openhands/sdk/llm/streaming.py +9 -0
- openhands/sdk/llm/utils/metrics.py +312 -0
- openhands/sdk/llm/utils/model_features.py +192 -0
- openhands/sdk/llm/utils/model_info.py +90 -0
- openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
- openhands/sdk/llm/utils/retry_mixin.py +128 -0
- openhands/sdk/llm/utils/telemetry.py +362 -0
- openhands/sdk/llm/utils/unverified_models.py +156 -0
- openhands/sdk/llm/utils/verified_models.py +65 -0
- openhands/sdk/logger/__init__.py +22 -0
- openhands/sdk/logger/logger.py +195 -0
- openhands/sdk/logger/rolling.py +113 -0
- openhands/sdk/mcp/__init__.py +24 -0
- openhands/sdk/mcp/client.py +76 -0
- openhands/sdk/mcp/definition.py +106 -0
- openhands/sdk/mcp/exceptions.py +19 -0
- openhands/sdk/mcp/tool.py +270 -0
- openhands/sdk/mcp/utils.py +83 -0
- openhands/sdk/observability/__init__.py +4 -0
- openhands/sdk/observability/laminar.py +166 -0
- openhands/sdk/observability/utils.py +20 -0
- openhands/sdk/py.typed +0 -0
- openhands/sdk/secret/__init__.py +19 -0
- openhands/sdk/secret/secrets.py +92 -0
- openhands/sdk/security/__init__.py +6 -0
- openhands/sdk/security/analyzer.py +111 -0
- openhands/sdk/security/confirmation_policy.py +61 -0
- openhands/sdk/security/llm_analyzer.py +29 -0
- openhands/sdk/security/risk.py +100 -0
- openhands/sdk/tool/__init__.py +34 -0
- openhands/sdk/tool/builtins/__init__.py +34 -0
- openhands/sdk/tool/builtins/finish.py +106 -0
- openhands/sdk/tool/builtins/think.py +117 -0
- openhands/sdk/tool/registry.py +184 -0
- openhands/sdk/tool/schema.py +286 -0
- openhands/sdk/tool/spec.py +39 -0
- openhands/sdk/tool/tool.py +481 -0
- openhands/sdk/utils/__init__.py +22 -0
- openhands/sdk/utils/async_executor.py +115 -0
- openhands/sdk/utils/async_utils.py +39 -0
- openhands/sdk/utils/cipher.py +68 -0
- openhands/sdk/utils/command.py +90 -0
- openhands/sdk/utils/deprecation.py +166 -0
- openhands/sdk/utils/github.py +44 -0
- openhands/sdk/utils/json.py +48 -0
- openhands/sdk/utils/models.py +570 -0
- openhands/sdk/utils/paging.py +63 -0
- openhands/sdk/utils/pydantic_diff.py +85 -0
- openhands/sdk/utils/pydantic_secrets.py +64 -0
- openhands/sdk/utils/truncate.py +117 -0
- openhands/sdk/utils/visualize.py +58 -0
- openhands/sdk/workspace/__init__.py +17 -0
- openhands/sdk/workspace/base.py +158 -0
- openhands/sdk/workspace/local.py +189 -0
- openhands/sdk/workspace/models.py +35 -0
- openhands/sdk/workspace/remote/__init__.py +8 -0
- openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
- openhands/sdk/workspace/remote/base.py +164 -0
- openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
- openhands/sdk/workspace/workspace.py +49 -0
- openhands_sdk-1.7.3.dist-info/METADATA +17 -0
- openhands_sdk-1.7.3.dist-info/RECORD +180 -0
- openhands_sdk-1.7.3.dist-info/WHEEL +5 -0
- openhands_sdk-1.7.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import pathlib
|
|
4
|
+
from collections.abc import Mapping
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
7
|
+
|
|
8
|
+
from openhands.sdk.context.prompts import render_template
|
|
9
|
+
from openhands.sdk.context.skills import (
|
|
10
|
+
Skill,
|
|
11
|
+
SkillKnowledge,
|
|
12
|
+
load_public_skills,
|
|
13
|
+
load_user_skills,
|
|
14
|
+
)
|
|
15
|
+
from openhands.sdk.llm import Message, TextContent
|
|
16
|
+
from openhands.sdk.llm.utils.model_prompt_spec import get_model_prompt_spec
|
|
17
|
+
from openhands.sdk.logger import get_logger
|
|
18
|
+
from openhands.sdk.secret import SecretSource, SecretValue
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
PROMPT_DIR = pathlib.Path(__file__).parent / "prompts" / "templates"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AgentContext(BaseModel):
|
|
27
|
+
"""Central structure for managing prompt extension.
|
|
28
|
+
|
|
29
|
+
AgentContext unifies all the contextual inputs that shape how the system
|
|
30
|
+
extends and interprets user prompts. It combines both static environment
|
|
31
|
+
details and dynamic, user-activated extensions from skills.
|
|
32
|
+
|
|
33
|
+
Specifically, it provides:
|
|
34
|
+
- **Repository context / Repo Skills**: Information about the active codebase,
|
|
35
|
+
branches, and repo-specific instructions contributed by repo skills.
|
|
36
|
+
- **Runtime context**: Current execution environment (hosts, working
|
|
37
|
+
directory, secrets, date, etc.).
|
|
38
|
+
- **Conversation instructions**: Optional task- or channel-specific rules
|
|
39
|
+
that constrain or guide the agent’s behavior across the session.
|
|
40
|
+
- **Knowledge Skills**: Extensible components that can be triggered by user input
|
|
41
|
+
to inject knowledge or domain-specific guidance.
|
|
42
|
+
|
|
43
|
+
Together, these elements make AgentContext the primary container responsible
|
|
44
|
+
for assembling, formatting, and injecting all prompt-relevant context into
|
|
45
|
+
LLM interactions.
|
|
46
|
+
""" # noqa: E501
|
|
47
|
+
|
|
48
|
+
skills: list[Skill] = Field(
|
|
49
|
+
default_factory=list,
|
|
50
|
+
description="List of available skills that can extend the user's input.",
|
|
51
|
+
)
|
|
52
|
+
system_message_suffix: str | None = Field(
|
|
53
|
+
default=None, description="Optional suffix to append to the system prompt."
|
|
54
|
+
)
|
|
55
|
+
user_message_suffix: str | None = Field(
|
|
56
|
+
default=None, description="Optional suffix to append to the user's message."
|
|
57
|
+
)
|
|
58
|
+
load_user_skills: bool = Field(
|
|
59
|
+
default=False,
|
|
60
|
+
description=(
|
|
61
|
+
"Whether to automatically load user skills from ~/.openhands/skills/ "
|
|
62
|
+
"and ~/.openhands/microagents/ (for backward compatibility). "
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
load_public_skills: bool = Field(
|
|
66
|
+
default=False,
|
|
67
|
+
description=(
|
|
68
|
+
"Whether to automatically load skills from the public OpenHands "
|
|
69
|
+
"skills repository at https://github.com/OpenHands/skills. "
|
|
70
|
+
"This allows you to get the latest skills without SDK updates."
|
|
71
|
+
),
|
|
72
|
+
)
|
|
73
|
+
secrets: Mapping[str, SecretValue] | None = Field(
|
|
74
|
+
default=None,
|
|
75
|
+
description=(
|
|
76
|
+
"Dictionary mapping secret keys to values or secret sources. "
|
|
77
|
+
"Secrets are used for authentication and sensitive data handling. "
|
|
78
|
+
"Values can be either strings or SecretSource instances "
|
|
79
|
+
"(str | SecretSource)."
|
|
80
|
+
),
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
@field_validator("skills")
|
|
84
|
+
@classmethod
|
|
85
|
+
def _validate_skills(cls, v: list[Skill], _info):
|
|
86
|
+
if not v:
|
|
87
|
+
return v
|
|
88
|
+
# Check for duplicate skill names
|
|
89
|
+
seen_names = set()
|
|
90
|
+
for skill in v:
|
|
91
|
+
if skill.name in seen_names:
|
|
92
|
+
raise ValueError(f"Duplicate skill name found: {skill.name}")
|
|
93
|
+
seen_names.add(skill.name)
|
|
94
|
+
return v
|
|
95
|
+
|
|
96
|
+
@model_validator(mode="after")
|
|
97
|
+
def _load_user_skills(self):
|
|
98
|
+
"""Load user skills from home directory if enabled."""
|
|
99
|
+
if not self.load_user_skills:
|
|
100
|
+
return self
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
user_skills = load_user_skills()
|
|
104
|
+
# Merge user skills with explicit skills, avoiding duplicates
|
|
105
|
+
existing_names = {skill.name for skill in self.skills}
|
|
106
|
+
for user_skill in user_skills:
|
|
107
|
+
if user_skill.name not in existing_names:
|
|
108
|
+
self.skills.append(user_skill)
|
|
109
|
+
else:
|
|
110
|
+
logger.warning(
|
|
111
|
+
f"Skipping user skill '{user_skill.name}' "
|
|
112
|
+
f"(already in explicit skills)"
|
|
113
|
+
)
|
|
114
|
+
except Exception as e:
|
|
115
|
+
logger.warning(f"Failed to load user skills: {str(e)}")
|
|
116
|
+
|
|
117
|
+
return self
|
|
118
|
+
|
|
119
|
+
@model_validator(mode="after")
|
|
120
|
+
def _load_public_skills(self):
|
|
121
|
+
"""Load public skills from OpenHands skills repository if enabled."""
|
|
122
|
+
if not self.load_public_skills:
|
|
123
|
+
return self
|
|
124
|
+
try:
|
|
125
|
+
public_skills = load_public_skills()
|
|
126
|
+
# Merge public skills with explicit skills, avoiding duplicates
|
|
127
|
+
existing_names = {skill.name for skill in self.skills}
|
|
128
|
+
for public_skill in public_skills:
|
|
129
|
+
if public_skill.name not in existing_names:
|
|
130
|
+
self.skills.append(public_skill)
|
|
131
|
+
else:
|
|
132
|
+
logger.warning(
|
|
133
|
+
f"Skipping public skill '{public_skill.name}' "
|
|
134
|
+
f"(already in existing skills)"
|
|
135
|
+
)
|
|
136
|
+
except Exception as e:
|
|
137
|
+
logger.warning(f"Failed to load public skills: {str(e)}")
|
|
138
|
+
return self
|
|
139
|
+
|
|
140
|
+
def get_secret_infos(self) -> list[dict[str, str]]:
|
|
141
|
+
"""Get secret information (name and description) from the secrets field.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
List of dictionaries with 'name' and 'description' keys.
|
|
145
|
+
Returns an empty list if no secrets are configured.
|
|
146
|
+
Description will be None if not available.
|
|
147
|
+
"""
|
|
148
|
+
if not self.secrets:
|
|
149
|
+
return []
|
|
150
|
+
secret_infos = []
|
|
151
|
+
for name, secret_value in self.secrets.items():
|
|
152
|
+
description = None
|
|
153
|
+
if isinstance(secret_value, SecretSource):
|
|
154
|
+
description = secret_value.description
|
|
155
|
+
secret_infos.append({"name": name, "description": description})
|
|
156
|
+
return secret_infos
|
|
157
|
+
|
|
158
|
+
def get_system_message_suffix(
|
|
159
|
+
self,
|
|
160
|
+
llm_model: str | None = None,
|
|
161
|
+
llm_model_canonical: str | None = None,
|
|
162
|
+
) -> str | None:
|
|
163
|
+
"""Get the system message with repo skill content and custom suffix.
|
|
164
|
+
|
|
165
|
+
Custom suffix can typically includes:
|
|
166
|
+
- Repository information (repo name, branch name, PR number, etc.)
|
|
167
|
+
- Runtime information (e.g., available hosts, current date)
|
|
168
|
+
- Conversation instructions (e.g., user preferences, task details)
|
|
169
|
+
- Repository-specific instructions (collected from repo skills)
|
|
170
|
+
"""
|
|
171
|
+
repo_skills = [s for s in self.skills if s.trigger is None]
|
|
172
|
+
|
|
173
|
+
# Gate vendor-specific repo skills based on model family.
|
|
174
|
+
if llm_model or llm_model_canonical:
|
|
175
|
+
spec = get_model_prompt_spec(llm_model or "", llm_model_canonical)
|
|
176
|
+
family = (spec.family or "").lower()
|
|
177
|
+
if family:
|
|
178
|
+
filtered: list[Skill] = []
|
|
179
|
+
for s in repo_skills:
|
|
180
|
+
n = (s.name or "").lower()
|
|
181
|
+
if n == "claude" and not (
|
|
182
|
+
"anthropic" in family or "claude" in family
|
|
183
|
+
):
|
|
184
|
+
continue
|
|
185
|
+
if n == "gemini" and not (
|
|
186
|
+
"gemini" in family or "google_gemini" in family
|
|
187
|
+
):
|
|
188
|
+
continue
|
|
189
|
+
filtered.append(s)
|
|
190
|
+
repo_skills = filtered
|
|
191
|
+
|
|
192
|
+
logger.debug(f"Triggered {len(repo_skills)} repository skills: {repo_skills}")
|
|
193
|
+
# Build the workspace context information
|
|
194
|
+
secret_infos = self.get_secret_infos()
|
|
195
|
+
if repo_skills or self.system_message_suffix or secret_infos:
|
|
196
|
+
formatted_text = render_template(
|
|
197
|
+
prompt_dir=str(PROMPT_DIR),
|
|
198
|
+
template_name="system_message_suffix.j2",
|
|
199
|
+
repo_skills=repo_skills,
|
|
200
|
+
system_message_suffix=self.system_message_suffix or "",
|
|
201
|
+
secret_infos=secret_infos,
|
|
202
|
+
).strip()
|
|
203
|
+
return formatted_text
|
|
204
|
+
elif self.system_message_suffix and self.system_message_suffix.strip():
|
|
205
|
+
return self.system_message_suffix.strip()
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
def get_user_message_suffix(
|
|
209
|
+
self, user_message: Message, skip_skill_names: list[str]
|
|
210
|
+
) -> tuple[TextContent, list[str]] | None:
|
|
211
|
+
"""Augment the user’s message with knowledge recalled from skills.
|
|
212
|
+
|
|
213
|
+
This works by:
|
|
214
|
+
- Extracting the text content of the user message
|
|
215
|
+
- Matching skill triggers against the query
|
|
216
|
+
- Returning formatted knowledge and triggered skill names if relevant skills were triggered
|
|
217
|
+
""" # noqa: E501
|
|
218
|
+
|
|
219
|
+
user_message_suffix = None
|
|
220
|
+
if self.user_message_suffix and self.user_message_suffix.strip():
|
|
221
|
+
user_message_suffix = self.user_message_suffix.strip()
|
|
222
|
+
|
|
223
|
+
query = "\n".join(
|
|
224
|
+
c.text for c in user_message.content if isinstance(c, TextContent)
|
|
225
|
+
).strip()
|
|
226
|
+
recalled_knowledge: list[SkillKnowledge] = []
|
|
227
|
+
# skip empty queries, but still return user_message_suffix if it exists
|
|
228
|
+
if not query:
|
|
229
|
+
if user_message_suffix:
|
|
230
|
+
return TextContent(text=user_message_suffix), []
|
|
231
|
+
return None
|
|
232
|
+
# Search for skill triggers in the query
|
|
233
|
+
for skill in self.skills:
|
|
234
|
+
if not isinstance(skill, Skill):
|
|
235
|
+
continue
|
|
236
|
+
trigger = skill.match_trigger(query)
|
|
237
|
+
if trigger and skill.name not in skip_skill_names:
|
|
238
|
+
logger.info(
|
|
239
|
+
"Skill '%s' triggered by keyword '%s'",
|
|
240
|
+
skill.name,
|
|
241
|
+
trigger,
|
|
242
|
+
)
|
|
243
|
+
recalled_knowledge.append(
|
|
244
|
+
SkillKnowledge(
|
|
245
|
+
name=skill.name,
|
|
246
|
+
trigger=trigger,
|
|
247
|
+
content=skill.content,
|
|
248
|
+
)
|
|
249
|
+
)
|
|
250
|
+
if recalled_knowledge:
|
|
251
|
+
formatted_skill_text = render_template(
|
|
252
|
+
prompt_dir=str(PROMPT_DIR),
|
|
253
|
+
template_name="skill_knowledge_info.j2",
|
|
254
|
+
triggered_agents=recalled_knowledge,
|
|
255
|
+
)
|
|
256
|
+
if user_message_suffix:
|
|
257
|
+
formatted_skill_text += "\n" + user_message_suffix
|
|
258
|
+
return TextContent(text=formatted_skill_text), [
|
|
259
|
+
k.name for k in recalled_knowledge
|
|
260
|
+
]
|
|
261
|
+
|
|
262
|
+
if user_message_suffix:
|
|
263
|
+
return TextContent(text=user_message_suffix), []
|
|
264
|
+
return None
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from openhands.sdk.context.condenser.base import (
|
|
2
|
+
CondenserBase,
|
|
3
|
+
RollingCondenser,
|
|
4
|
+
)
|
|
5
|
+
from openhands.sdk.context.condenser.llm_summarizing_condenser import (
|
|
6
|
+
LLMSummarizingCondenser,
|
|
7
|
+
)
|
|
8
|
+
from openhands.sdk.context.condenser.no_op_condenser import NoOpCondenser
|
|
9
|
+
from openhands.sdk.context.condenser.pipeline_condenser import PipelineCondenser
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"CondenserBase",
|
|
14
|
+
"RollingCondenser",
|
|
15
|
+
"NoOpCondenser",
|
|
16
|
+
"PipelineCondenser",
|
|
17
|
+
"LLMSummarizingCondenser",
|
|
18
|
+
]
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from logging import getLogger
|
|
3
|
+
|
|
4
|
+
from openhands.sdk.context.view import View
|
|
5
|
+
from openhands.sdk.event.condenser import Condensation
|
|
6
|
+
from openhands.sdk.llm import LLM
|
|
7
|
+
from openhands.sdk.utils.models import (
|
|
8
|
+
DiscriminatedUnionMixin,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
logger = getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class CondenserBase(DiscriminatedUnionMixin, ABC):
|
|
16
|
+
"""Abstract condenser interface.
|
|
17
|
+
|
|
18
|
+
Condensers take a list of `Event` objects and reduce them into a potentially smaller
|
|
19
|
+
list.
|
|
20
|
+
|
|
21
|
+
Agents can use condensers to reduce the amount of events they need to consider when
|
|
22
|
+
deciding which action to take. To use a condenser, agents can call the
|
|
23
|
+
`condensed_history` method on the current `State` being considered and use the
|
|
24
|
+
results instead of the full history.
|
|
25
|
+
|
|
26
|
+
If the condenser returns a `Condensation` instead of a `View`, the agent should
|
|
27
|
+
return `Condensation.action` instead of producing its own action. On the next agent
|
|
28
|
+
step the condenser will use that condensation event to produce a new `View`.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
@abstractmethod
|
|
32
|
+
def condense(self, view: View, agent_llm: LLM | None = None) -> View | Condensation:
|
|
33
|
+
"""Condense a sequence of events into a potentially smaller list.
|
|
34
|
+
|
|
35
|
+
New condenser strategies should override this method to implement their own
|
|
36
|
+
condensation logic. Call `self.add_metadata` in the implementation to record any
|
|
37
|
+
relevant per-condensation diagnostic information.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
view: A view of the history containing all events that should be condensed.
|
|
41
|
+
agent_llm: LLM instance used by the agent. Condensers use this for token
|
|
42
|
+
counting purposes. Defaults to None.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
View | Condensation: A condensed view of the events or an event indicating
|
|
46
|
+
the history has been condensed.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def handles_condensation_requests(self) -> bool:
|
|
50
|
+
"""Whether this condenser handles explicit condensation requests.
|
|
51
|
+
|
|
52
|
+
If this returns True, the agent will trigger the condenser whenever a
|
|
53
|
+
CondensationRequest event is added to the history. If False, the condenser will
|
|
54
|
+
only be triggered when the agent's own logic decides to do so (e.g. context
|
|
55
|
+
window exceeded).
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
bool: True if the condenser handles explicit condensation requests, False
|
|
59
|
+
otherwise.
|
|
60
|
+
"""
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class PipelinableCondenserBase(CondenserBase):
|
|
65
|
+
"""Abstract condenser interface which may be pipelined. (Since a pipeline
|
|
66
|
+
condenser should not nest another pipeline condenser)"""
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class RollingCondenser(PipelinableCondenserBase, ABC):
|
|
70
|
+
"""Base class for a specialized condenser strategy that applies condensation to a
|
|
71
|
+
rolling history.
|
|
72
|
+
|
|
73
|
+
The rolling history is generated by `View.from_events`, which analyzes all events in
|
|
74
|
+
the history and produces a `View` object representing what will be sent to the LLM.
|
|
75
|
+
|
|
76
|
+
If `should_condense` says so, the condenser is then responsible for generating a
|
|
77
|
+
`Condensation` object from the `View` object. This will be added to the event
|
|
78
|
+
history which should -- when given to `get_view` -- produce the condensed `View` to
|
|
79
|
+
be passed to the LLM.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
@abstractmethod
|
|
83
|
+
def should_condense(self, view: View, agent_llm: LLM | None = None) -> bool:
|
|
84
|
+
"""Determine if a view should be condensed."""
|
|
85
|
+
|
|
86
|
+
@abstractmethod
|
|
87
|
+
def get_condensation(
|
|
88
|
+
self, view: View, agent_llm: LLM | None = None
|
|
89
|
+
) -> Condensation:
|
|
90
|
+
"""Get the condensation from a view."""
|
|
91
|
+
|
|
92
|
+
def condense(self, view: View, agent_llm: LLM | None = None) -> View | Condensation:
|
|
93
|
+
# If we trigger the condenser-specific condensation threshold, compute and
|
|
94
|
+
# return the condensation.
|
|
95
|
+
if self.should_condense(view, agent_llm=agent_llm):
|
|
96
|
+
return self.get_condensation(view, agent_llm=agent_llm)
|
|
97
|
+
|
|
98
|
+
# Otherwise we're safe to just return the view.
|
|
99
|
+
else:
|
|
100
|
+
return view
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
from pydantic import Field, model_validator
|
|
6
|
+
|
|
7
|
+
from openhands.sdk.context.condenser.base import RollingCondenser
|
|
8
|
+
from openhands.sdk.context.condenser.utils import (
|
|
9
|
+
get_suffix_length_for_token_reduction,
|
|
10
|
+
get_total_token_count,
|
|
11
|
+
)
|
|
12
|
+
from openhands.sdk.context.prompts import render_template
|
|
13
|
+
from openhands.sdk.context.view import View
|
|
14
|
+
from openhands.sdk.event.base import LLMConvertibleEvent
|
|
15
|
+
from openhands.sdk.event.condenser import Condensation
|
|
16
|
+
from openhands.sdk.event.llm_convertible import MessageEvent
|
|
17
|
+
from openhands.sdk.llm import LLM, Message, TextContent
|
|
18
|
+
from openhands.sdk.observability.laminar import observe
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Reason(Enum):
|
|
22
|
+
"""Reasons for condensation."""
|
|
23
|
+
|
|
24
|
+
REQUEST = "request"
|
|
25
|
+
TOKENS = "tokens"
|
|
26
|
+
EVENTS = "events"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LLMSummarizingCondenser(RollingCondenser):
|
|
30
|
+
"""LLM-based condenser that summarizes forgotten events.
|
|
31
|
+
|
|
32
|
+
Uses an independent LLM (stored in the `llm` attribute) for generating summaries
|
|
33
|
+
of forgotten events. The optional `agent_llm` parameter passed to condense() is
|
|
34
|
+
the LLM used by the agent for token counting purposes, and you should not assume
|
|
35
|
+
it is the same as the one defined in this condenser.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
llm: LLM
|
|
39
|
+
max_size: int = Field(default=240, gt=0)
|
|
40
|
+
max_tokens: int | None = None
|
|
41
|
+
keep_first: int = Field(default=2, ge=0)
|
|
42
|
+
|
|
43
|
+
@model_validator(mode="after")
|
|
44
|
+
def validate_keep_first_vs_max_size(self):
|
|
45
|
+
events_from_tail = self.max_size // 2 - self.keep_first - 1
|
|
46
|
+
if events_from_tail <= 0:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"keep_first must be less than max_size // 2 to leave room for "
|
|
49
|
+
"condensation"
|
|
50
|
+
)
|
|
51
|
+
return self
|
|
52
|
+
|
|
53
|
+
def handles_condensation_requests(self) -> bool:
|
|
54
|
+
return True
|
|
55
|
+
|
|
56
|
+
def get_condensation_reasons(
|
|
57
|
+
self, view: View, agent_llm: LLM | None = None
|
|
58
|
+
) -> set[Reason]:
|
|
59
|
+
"""Determine the reasons why the view should be condensed.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
view: The current view to evaluate.
|
|
63
|
+
agent_llm: The LLM used by the agent. Required if token counting is needed.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
A set of Reason enums indicating why condensation is needed.
|
|
67
|
+
"""
|
|
68
|
+
reasons = set()
|
|
69
|
+
|
|
70
|
+
# Reason 1: Unhandled condensation request. The view handles the detection of
|
|
71
|
+
# these requests while processing the event stream.
|
|
72
|
+
if view.unhandled_condensation_request:
|
|
73
|
+
reasons.add(Reason.REQUEST)
|
|
74
|
+
|
|
75
|
+
# Reason 2: Token limit is provided and exceeded.
|
|
76
|
+
if self.max_tokens and agent_llm:
|
|
77
|
+
total_tokens = get_total_token_count(view.events, agent_llm)
|
|
78
|
+
if total_tokens > self.max_tokens:
|
|
79
|
+
reasons.add(Reason.TOKENS)
|
|
80
|
+
|
|
81
|
+
# Reason 3: View exceeds maximum size in number of events.
|
|
82
|
+
if len(view) > self.max_size:
|
|
83
|
+
reasons.add(Reason.EVENTS)
|
|
84
|
+
|
|
85
|
+
return reasons
|
|
86
|
+
|
|
87
|
+
def should_condense(self, view: View, agent_llm: LLM | None = None) -> bool:
|
|
88
|
+
reasons = self.get_condensation_reasons(view, agent_llm)
|
|
89
|
+
return reasons != set()
|
|
90
|
+
|
|
91
|
+
def _get_summary_event_content(self, view: View) -> str:
|
|
92
|
+
"""Extract the text content from the summary event in the view, if any.
|
|
93
|
+
|
|
94
|
+
If there is no summary event or it does not contain text content, returns an
|
|
95
|
+
empty string.
|
|
96
|
+
"""
|
|
97
|
+
summary_event_content: str = ""
|
|
98
|
+
|
|
99
|
+
summary_event = view.summary_event
|
|
100
|
+
if isinstance(summary_event, MessageEvent):
|
|
101
|
+
message_content = summary_event.llm_message.content[0]
|
|
102
|
+
if isinstance(message_content, TextContent):
|
|
103
|
+
summary_event_content = message_content.text
|
|
104
|
+
|
|
105
|
+
return summary_event_content
|
|
106
|
+
|
|
107
|
+
def _generate_condensation(
|
|
108
|
+
self,
|
|
109
|
+
summary_event_content: str,
|
|
110
|
+
forgotten_events: Sequence[LLMConvertibleEvent],
|
|
111
|
+
summary_offset: int,
|
|
112
|
+
) -> Condensation:
|
|
113
|
+
"""Generate a condensation by using the condenser's LLM to summarize forgotten
|
|
114
|
+
events.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
summary_event_content: The content of the previous summary event.
|
|
118
|
+
forgotten_events: The list of events to be summarized.
|
|
119
|
+
summary_offset: The index where the summary event should be inserted.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Condensation: The generated condensation object.
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
ValueError: If forgotten_events is empty (0 events to condense).
|
|
126
|
+
"""
|
|
127
|
+
if len(forgotten_events) == 0:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
"Cannot condense 0 events. This typically occurs when a tool loop "
|
|
130
|
+
"spans almost the entire view, leaving no valid range for forgetting "
|
|
131
|
+
"events. Consider adjusting keep_first or max_size parameters."
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Convert events to strings for the template
|
|
135
|
+
event_strings = [str(forgotten_event) for forgotten_event in forgotten_events]
|
|
136
|
+
|
|
137
|
+
prompt = render_template(
|
|
138
|
+
os.path.join(os.path.dirname(__file__), "prompts"),
|
|
139
|
+
"summarizing_prompt.j2",
|
|
140
|
+
previous_summary=summary_event_content,
|
|
141
|
+
events=event_strings,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
messages = [Message(role="user", content=[TextContent(text=prompt)])]
|
|
145
|
+
|
|
146
|
+
# Do not pass extra_body explicitly. The LLM handles forwarding
|
|
147
|
+
# litellm_extra_body only when it is non-empty.
|
|
148
|
+
llm_response = self.llm.completion(
|
|
149
|
+
messages=messages,
|
|
150
|
+
)
|
|
151
|
+
# Extract summary from the LLMResponse message
|
|
152
|
+
summary = None
|
|
153
|
+
if llm_response.message.content:
|
|
154
|
+
first_content = llm_response.message.content[0]
|
|
155
|
+
if isinstance(first_content, TextContent):
|
|
156
|
+
summary = first_content.text
|
|
157
|
+
|
|
158
|
+
return Condensation(
|
|
159
|
+
forgotten_event_ids=[event.id for event in forgotten_events],
|
|
160
|
+
summary=summary,
|
|
161
|
+
summary_offset=summary_offset,
|
|
162
|
+
llm_response_id=llm_response.id,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _get_forgotten_events(
|
|
166
|
+
self, view: View, agent_llm: LLM | None = None
|
|
167
|
+
) -> tuple[Sequence[LLMConvertibleEvent], int]:
|
|
168
|
+
"""Identify events to be forgotten and the summary offset.
|
|
169
|
+
|
|
170
|
+
Relies on the condensation reasons to determine how many events we need to drop
|
|
171
|
+
in order to maintain our resource constraints. Uses manipulation indices to
|
|
172
|
+
ensure forgetting ranges respect atomic unit boundaries.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
view: The current view from which to identify forgotten events.
|
|
176
|
+
agent_llm: The LLM used by the agent, required for token-based calculations.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
A tuple of (events to forget, summary_offset).
|
|
180
|
+
"""
|
|
181
|
+
reasons = self.get_condensation_reasons(view, agent_llm=agent_llm)
|
|
182
|
+
assert reasons != set(), "No condensation reasons found."
|
|
183
|
+
|
|
184
|
+
suffix_events_to_keep: set[int] = set()
|
|
185
|
+
|
|
186
|
+
if Reason.REQUEST in reasons:
|
|
187
|
+
target_size = len(view) // 2
|
|
188
|
+
suffix_events_to_keep.add(target_size - self.keep_first - 1)
|
|
189
|
+
|
|
190
|
+
if Reason.EVENTS in reasons:
|
|
191
|
+
target_size = self.max_size // 2
|
|
192
|
+
suffix_events_to_keep.add(target_size - self.keep_first - 1)
|
|
193
|
+
|
|
194
|
+
if Reason.TOKENS in reasons:
|
|
195
|
+
# Compute the number of tokens we need to eliminate to be under half the
|
|
196
|
+
# max_tokens value. We know max_tokens and the agent LLM are not None here
|
|
197
|
+
# because we can't have Reason.TOKENS without them.
|
|
198
|
+
assert self.max_tokens is not None
|
|
199
|
+
assert agent_llm is not None
|
|
200
|
+
|
|
201
|
+
total_tokens = get_total_token_count(view.events, agent_llm)
|
|
202
|
+
tokens_to_reduce = total_tokens - (self.max_tokens // 2)
|
|
203
|
+
|
|
204
|
+
suffix_events_to_keep.add(
|
|
205
|
+
get_suffix_length_for_token_reduction(
|
|
206
|
+
events=view.events[self.keep_first :],
|
|
207
|
+
llm=agent_llm,
|
|
208
|
+
token_reduction=tokens_to_reduce,
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# We might have multiple reasons to condense, so pick the strictest condensation
|
|
213
|
+
# to ensure all resource constraints are met.
|
|
214
|
+
events_from_tail = min(suffix_events_to_keep)
|
|
215
|
+
|
|
216
|
+
# Calculate naive forgetting end (without considering atomic boundaries)
|
|
217
|
+
naive_end = len(view) - events_from_tail
|
|
218
|
+
|
|
219
|
+
# Find actual forgetting_start: smallest manipulation index > keep_first
|
|
220
|
+
forgetting_start = view.find_next_manipulation_index(
|
|
221
|
+
self.keep_first, strict=True
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# Find actual forgetting_end: smallest manipulation index >= naive_end
|
|
225
|
+
forgetting_end = view.find_next_manipulation_index(naive_end, strict=False)
|
|
226
|
+
|
|
227
|
+
# Extract events to forget using boundary-aware indices
|
|
228
|
+
forgotten_events = view[forgetting_start:forgetting_end]
|
|
229
|
+
|
|
230
|
+
# Summary offset is the same as forgetting_start
|
|
231
|
+
return forgotten_events, forgetting_start
|
|
232
|
+
|
|
233
|
+
@observe(ignore_inputs=["view", "agent_llm"])
|
|
234
|
+
def get_condensation(
|
|
235
|
+
self, view: View, agent_llm: LLM | None = None
|
|
236
|
+
) -> Condensation:
|
|
237
|
+
# The condensation is dependent on the events we want to drop and the previous
|
|
238
|
+
# summary.
|
|
239
|
+
summary_event_content = self._get_summary_event_content(view)
|
|
240
|
+
forgotten_events, summary_offset = self._get_forgotten_events(
|
|
241
|
+
view, agent_llm=agent_llm
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return self._generate_condensation(
|
|
245
|
+
summary_event_content=summary_event_content,
|
|
246
|
+
forgotten_events=forgotten_events,
|
|
247
|
+
summary_offset=summary_offset,
|
|
248
|
+
)
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from openhands.sdk.context.condenser.base import CondenserBase
|
|
2
|
+
from openhands.sdk.context.view import View
|
|
3
|
+
from openhands.sdk.event.condenser import Condensation
|
|
4
|
+
from openhands.sdk.llm import LLM
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class NoOpCondenser(CondenserBase):
|
|
8
|
+
"""Simple condenser that returns a view un-manipulated.
|
|
9
|
+
|
|
10
|
+
Primarily intended for testing purposes.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def condense(self, view: View, agent_llm: LLM | None = None) -> View | Condensation: # noqa: ARG002
|
|
14
|
+
return view
|