openhands-sdk 1.7.4__py3-none-any.whl → 1.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/__init__.py +2 -0
- openhands/sdk/agent/agent.py +27 -0
- openhands/sdk/agent/base.py +88 -82
- openhands/sdk/agent/prompts/system_prompt.j2 +1 -1
- openhands/sdk/agent/utils.py +3 -0
- openhands/sdk/context/agent_context.py +45 -3
- openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +4 -0
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +9 -0
- openhands/sdk/context/skills/__init__.py +12 -0
- openhands/sdk/context/skills/skill.py +275 -296
- openhands/sdk/context/skills/types.py +4 -0
- openhands/sdk/context/skills/utils.py +442 -0
- openhands/sdk/conversation/impl/local_conversation.py +42 -14
- openhands/sdk/conversation/state.py +52 -20
- openhands/sdk/event/llm_convertible/action.py +20 -0
- openhands/sdk/git/utils.py +31 -6
- openhands/sdk/hooks/conversation_hooks.py +57 -10
- openhands/sdk/llm/llm.py +58 -74
- openhands/sdk/llm/router/base.py +12 -0
- openhands/sdk/llm/utils/telemetry.py +2 -2
- openhands/sdk/plugin/__init__.py +22 -0
- openhands/sdk/plugin/plugin.py +299 -0
- openhands/sdk/plugin/types.py +226 -0
- openhands/sdk/tool/__init__.py +7 -1
- openhands/sdk/tool/builtins/__init__.py +4 -0
- openhands/sdk/tool/tool.py +60 -9
- openhands/sdk/workspace/remote/async_remote_workspace.py +16 -0
- openhands/sdk/workspace/remote/base.py +16 -0
- {openhands_sdk-1.7.4.dist-info → openhands_sdk-1.8.0.dist-info}/METADATA +1 -1
- {openhands_sdk-1.7.4.dist-info → openhands_sdk-1.8.0.dist-info}/RECORD +32 -28
- {openhands_sdk-1.7.4.dist-info → openhands_sdk-1.8.0.dist-info}/WHEEL +0 -0
- {openhands_sdk-1.7.4.dist-info → openhands_sdk-1.8.0.dist-info}/top_level.txt +0 -0
|
@@ -16,6 +16,10 @@ class SkillKnowledge(BaseModel):
|
|
|
16
16
|
name: str = Field(description="The name of the skill that was triggered")
|
|
17
17
|
trigger: str = Field(description="The word that triggered this skill")
|
|
18
18
|
content: str = Field(description="The actual content/knowledge from the skill")
|
|
19
|
+
location: str | None = Field(
|
|
20
|
+
default=None,
|
|
21
|
+
description="Path to the SKILL.md file (for resolving relative resource paths)",
|
|
22
|
+
)
|
|
19
23
|
|
|
20
24
|
|
|
21
25
|
class SkillResponse(BaseModel):
|
|
@@ -0,0 +1,442 @@
|
|
|
1
|
+
"""Utility functions for skill loading and management."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import re
|
|
8
|
+
import shutil
|
|
9
|
+
import subprocess
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from fastmcp.mcp_config import MCPConfig
|
|
14
|
+
|
|
15
|
+
from openhands.sdk.context.skills.exceptions import SkillValidationError
|
|
16
|
+
from openhands.sdk.logger import get_logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from openhands.sdk.context.skills.skill import Skill, SkillResources
|
|
21
|
+
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
24
|
+
# Standard resource directory names per AgentSkills spec
|
|
25
|
+
RESOURCE_DIRECTORIES = ("scripts", "references", "assets")
|
|
26
|
+
|
|
27
|
+
# Regex pattern for valid AgentSkills names
|
|
28
|
+
# - 1-64 characters
|
|
29
|
+
# - Lowercase alphanumeric + hyphens only (a-z, 0-9, -)
|
|
30
|
+
# - Must not start or end with hyphen
|
|
31
|
+
# - Must not contain consecutive hyphens (--)
|
|
32
|
+
SKILL_NAME_PATTERN = re.compile(r"^[a-z0-9]+(-[a-z0-9]+)*$")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def find_skill_md(skill_dir: Path) -> Path | None:
|
|
36
|
+
"""Find SKILL.md file in a directory (case-insensitive).
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
skill_dir: Path to the skill directory to search.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Path to SKILL.md if found, None otherwise.
|
|
43
|
+
"""
|
|
44
|
+
if not skill_dir.is_dir():
|
|
45
|
+
return None
|
|
46
|
+
for item in skill_dir.iterdir():
|
|
47
|
+
if item.is_file() and item.name.lower() == "skill.md":
|
|
48
|
+
return item
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def find_mcp_config(skill_dir: Path) -> Path | None:
|
|
53
|
+
"""Find .mcp.json file in a skill directory.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
skill_dir: Path to the skill directory to search.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Path to .mcp.json if found, None otherwise.
|
|
60
|
+
"""
|
|
61
|
+
if not skill_dir.is_dir():
|
|
62
|
+
return None
|
|
63
|
+
mcp_json = skill_dir / ".mcp.json"
|
|
64
|
+
if mcp_json.exists() and mcp_json.is_file():
|
|
65
|
+
return mcp_json
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def expand_mcp_variables(
|
|
70
|
+
config: dict,
|
|
71
|
+
variables: dict[str, str],
|
|
72
|
+
) -> dict:
|
|
73
|
+
"""Expand variables in MCP configuration.
|
|
74
|
+
|
|
75
|
+
Supports variable expansion similar to Claude Code:
|
|
76
|
+
- ${VAR} - Environment variables or provided variables
|
|
77
|
+
- ${VAR:-default} - With default value
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
config: MCP configuration dictionary.
|
|
81
|
+
variables: Dictionary of variable names to values.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Configuration with variables expanded.
|
|
85
|
+
"""
|
|
86
|
+
# Convert to JSON string for easy replacement
|
|
87
|
+
config_str = json.dumps(config)
|
|
88
|
+
|
|
89
|
+
# Pattern for ${VAR} or ${VAR:-default}
|
|
90
|
+
var_pattern = re.compile(r"\$\{([a-zA-Z_][a-zA-Z0-9_]*)(?::-([^}]*))?\}")
|
|
91
|
+
|
|
92
|
+
def replace_var(match: re.Match) -> str:
|
|
93
|
+
var_name = match.group(1)
|
|
94
|
+
default_value = match.group(2)
|
|
95
|
+
|
|
96
|
+
# Check provided variables first, then environment
|
|
97
|
+
if var_name in variables:
|
|
98
|
+
return variables[var_name]
|
|
99
|
+
if var_name in os.environ:
|
|
100
|
+
return os.environ[var_name]
|
|
101
|
+
if default_value is not None:
|
|
102
|
+
return default_value
|
|
103
|
+
# Return original if not found
|
|
104
|
+
return match.group(0)
|
|
105
|
+
|
|
106
|
+
config_str = var_pattern.sub(replace_var, config_str)
|
|
107
|
+
return json.loads(config_str)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def load_mcp_config(
|
|
111
|
+
mcp_json_path: Path,
|
|
112
|
+
skill_root: Path | None = None,
|
|
113
|
+
) -> dict:
|
|
114
|
+
"""Load and parse .mcp.json with variable expansion.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
mcp_json_path: Path to the .mcp.json file.
|
|
118
|
+
skill_root: Root directory of the skill (for ${SKILL_ROOT} expansion).
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Parsed MCP configuration dictionary.
|
|
122
|
+
|
|
123
|
+
Raises:
|
|
124
|
+
SkillValidationError: If the file cannot be parsed or is invalid.
|
|
125
|
+
"""
|
|
126
|
+
try:
|
|
127
|
+
with open(mcp_json_path) as f:
|
|
128
|
+
config = json.load(f)
|
|
129
|
+
except json.JSONDecodeError as e:
|
|
130
|
+
raise SkillValidationError(f"Invalid JSON in {mcp_json_path}: {e}") from e
|
|
131
|
+
except OSError as e:
|
|
132
|
+
raise SkillValidationError(f"Cannot read {mcp_json_path}: {e}") from e
|
|
133
|
+
|
|
134
|
+
if not isinstance(config, dict):
|
|
135
|
+
raise SkillValidationError(
|
|
136
|
+
f"Invalid .mcp.json format: expected object, got {type(config).__name__}"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Prepare variables for expansion
|
|
140
|
+
variables: dict[str, str] = {}
|
|
141
|
+
if skill_root:
|
|
142
|
+
variables["SKILL_ROOT"] = str(skill_root)
|
|
143
|
+
|
|
144
|
+
# Expand variables
|
|
145
|
+
config = expand_mcp_variables(config, variables)
|
|
146
|
+
|
|
147
|
+
# Validate using MCPConfig
|
|
148
|
+
try:
|
|
149
|
+
MCPConfig.model_validate(config)
|
|
150
|
+
except Exception as e:
|
|
151
|
+
raise SkillValidationError(f"Invalid MCP configuration: {e}") from e
|
|
152
|
+
|
|
153
|
+
return config
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def validate_skill_name(name: str, directory_name: str | None = None) -> list[str]:
|
|
157
|
+
"""Validate skill name according to AgentSkills spec.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
name: The skill name to validate.
|
|
161
|
+
directory_name: Optional directory name to check for match.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
List of validation error messages (empty if valid).
|
|
165
|
+
"""
|
|
166
|
+
errors = []
|
|
167
|
+
|
|
168
|
+
if not name:
|
|
169
|
+
errors.append("Name cannot be empty")
|
|
170
|
+
return errors
|
|
171
|
+
|
|
172
|
+
if len(name) > 64:
|
|
173
|
+
errors.append(f"Name exceeds 64 characters: {len(name)}")
|
|
174
|
+
|
|
175
|
+
if not SKILL_NAME_PATTERN.match(name):
|
|
176
|
+
errors.append(
|
|
177
|
+
"Name must be lowercase alphanumeric with single hyphens "
|
|
178
|
+
"(e.g., 'my-skill', 'pdf-tools')"
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
if directory_name and name != directory_name:
|
|
182
|
+
errors.append(f"Name '{name}' does not match directory '{directory_name}'")
|
|
183
|
+
|
|
184
|
+
return errors
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def find_third_party_files(
|
|
188
|
+
repo_root: Path, third_party_skill_names: dict[str, str]
|
|
189
|
+
) -> list[Path]:
|
|
190
|
+
"""Find third-party skill files in the repository root.
|
|
191
|
+
|
|
192
|
+
Searches for files like .cursorrules, AGENTS.md, CLAUDE.md, etc.
|
|
193
|
+
with case-insensitive matching.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
repo_root: Path to the repository root directory.
|
|
197
|
+
third_party_skill_names: Mapping of lowercase filenames to skill names.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
List of paths to third-party skill files found.
|
|
201
|
+
"""
|
|
202
|
+
if not repo_root.exists():
|
|
203
|
+
return []
|
|
204
|
+
|
|
205
|
+
# Build a set of target filenames (lowercase) for case-insensitive matching
|
|
206
|
+
target_names = {name.lower() for name in third_party_skill_names}
|
|
207
|
+
|
|
208
|
+
files: list[Path] = []
|
|
209
|
+
seen_names: set[str] = set()
|
|
210
|
+
for item in repo_root.iterdir():
|
|
211
|
+
if item.is_file() and item.name.lower() in target_names:
|
|
212
|
+
# Avoid duplicates (e.g., AGENTS.md and agents.md in same dir)
|
|
213
|
+
name_lower = item.name.lower()
|
|
214
|
+
if name_lower in seen_names:
|
|
215
|
+
logger.warning(
|
|
216
|
+
f"Duplicate third-party skill file ignored: {item} "
|
|
217
|
+
f"(already found a file with name '{name_lower}')"
|
|
218
|
+
)
|
|
219
|
+
else:
|
|
220
|
+
files.append(item)
|
|
221
|
+
seen_names.add(name_lower)
|
|
222
|
+
return files
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def find_skill_md_directories(skill_dir: Path) -> list[Path]:
|
|
226
|
+
"""Find AgentSkills-style directories containing SKILL.md files.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
skill_dir: Path to the skills directory.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
List of paths to SKILL.md files.
|
|
233
|
+
"""
|
|
234
|
+
results: list[Path] = []
|
|
235
|
+
if not skill_dir.exists():
|
|
236
|
+
return results
|
|
237
|
+
for subdir in skill_dir.iterdir():
|
|
238
|
+
if subdir.is_dir():
|
|
239
|
+
skill_md = find_skill_md(subdir)
|
|
240
|
+
if skill_md:
|
|
241
|
+
results.append(skill_md)
|
|
242
|
+
return results
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def find_regular_md_files(skill_dir: Path, exclude_dirs: set[Path]) -> list[Path]:
|
|
246
|
+
"""Find regular .md skill files, excluding SKILL.md and files in excluded dirs.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
skill_dir: Path to the skills directory.
|
|
250
|
+
exclude_dirs: Set of directories to exclude (e.g., SKILL.md directories).
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
List of paths to regular .md skill files.
|
|
254
|
+
"""
|
|
255
|
+
files: list[Path] = []
|
|
256
|
+
if not skill_dir.exists():
|
|
257
|
+
return files
|
|
258
|
+
for f in skill_dir.rglob("*.md"):
|
|
259
|
+
is_readme = f.name == "README.md"
|
|
260
|
+
is_skill_md = f.name.lower() == "skill.md"
|
|
261
|
+
is_in_excluded_dir = any(f.is_relative_to(d) for d in exclude_dirs)
|
|
262
|
+
if not is_readme and not is_skill_md and not is_in_excluded_dir:
|
|
263
|
+
files.append(f)
|
|
264
|
+
return files
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def load_and_categorize(
|
|
268
|
+
path: Path,
|
|
269
|
+
skill_base_dir: Path,
|
|
270
|
+
repo_skills: dict[str, Skill],
|
|
271
|
+
knowledge_skills: dict[str, Skill],
|
|
272
|
+
agent_skills: dict[str, Skill],
|
|
273
|
+
) -> None:
|
|
274
|
+
"""Load a skill and categorize it.
|
|
275
|
+
|
|
276
|
+
Categorizes into repo_skills, knowledge_skills, or agent_skills.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
path: Path to the skill file.
|
|
280
|
+
skill_base_dir: Base directory for skills (used to derive relative names).
|
|
281
|
+
repo_skills: Dictionary for skills with trigger=None (permanent context).
|
|
282
|
+
knowledge_skills: Dictionary for skills with triggers (progressive).
|
|
283
|
+
agent_skills: Dictionary for AgentSkills standard SKILL.md files.
|
|
284
|
+
"""
|
|
285
|
+
# Import here to avoid circular dependency
|
|
286
|
+
from openhands.sdk.context.skills.skill import Skill
|
|
287
|
+
|
|
288
|
+
skill = Skill.load(path, skill_base_dir)
|
|
289
|
+
|
|
290
|
+
# AgentSkills (SKILL.md directories) are a separate category from OpenHands skills.
|
|
291
|
+
# They follow the AgentSkills standard and should be handled differently.
|
|
292
|
+
is_skill_md = path.name.lower() == "skill.md"
|
|
293
|
+
if is_skill_md:
|
|
294
|
+
agent_skills[skill.name] = skill
|
|
295
|
+
elif skill.trigger is None:
|
|
296
|
+
repo_skills[skill.name] = skill
|
|
297
|
+
else:
|
|
298
|
+
knowledge_skills[skill.name] = skill
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def get_skills_cache_dir() -> Path:
|
|
302
|
+
"""Get the local cache directory for public skills repository.
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
Path to the skills cache directory (~/.openhands/cache/skills).
|
|
306
|
+
"""
|
|
307
|
+
cache_dir = Path.home() / ".openhands" / "cache" / "skills"
|
|
308
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
309
|
+
return cache_dir
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
def update_skills_repository(
|
|
313
|
+
repo_url: str,
|
|
314
|
+
branch: str,
|
|
315
|
+
cache_dir: Path,
|
|
316
|
+
) -> Path | None:
|
|
317
|
+
"""Clone or update the local skills repository.
|
|
318
|
+
|
|
319
|
+
Args:
|
|
320
|
+
repo_url: URL of the skills repository.
|
|
321
|
+
branch: Branch name to use.
|
|
322
|
+
cache_dir: Directory where the repository should be cached.
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Path to the local repository if successful, None otherwise.
|
|
326
|
+
"""
|
|
327
|
+
repo_path = cache_dir / "public-skills"
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
if repo_path.exists() and (repo_path / ".git").exists():
|
|
331
|
+
logger.debug(f"Updating skills repository at {repo_path}")
|
|
332
|
+
try:
|
|
333
|
+
subprocess.run(
|
|
334
|
+
["git", "fetch", "origin"],
|
|
335
|
+
cwd=repo_path,
|
|
336
|
+
check=True,
|
|
337
|
+
capture_output=True,
|
|
338
|
+
timeout=30,
|
|
339
|
+
)
|
|
340
|
+
subprocess.run(
|
|
341
|
+
["git", "reset", "--hard", f"origin/{branch}"],
|
|
342
|
+
cwd=repo_path,
|
|
343
|
+
check=True,
|
|
344
|
+
capture_output=True,
|
|
345
|
+
timeout=10,
|
|
346
|
+
)
|
|
347
|
+
logger.debug("Skills repository updated successfully")
|
|
348
|
+
except subprocess.TimeoutExpired:
|
|
349
|
+
logger.warning("Git pull timed out, using existing cached repository")
|
|
350
|
+
except subprocess.CalledProcessError as e:
|
|
351
|
+
logger.warning(
|
|
352
|
+
f"Failed to update repository: {e.stderr.decode()}, "
|
|
353
|
+
f"using existing cached version"
|
|
354
|
+
)
|
|
355
|
+
else:
|
|
356
|
+
logger.info(f"Cloning public skills repository from {repo_url}")
|
|
357
|
+
if repo_path.exists():
|
|
358
|
+
shutil.rmtree(repo_path)
|
|
359
|
+
|
|
360
|
+
subprocess.run(
|
|
361
|
+
[
|
|
362
|
+
"git",
|
|
363
|
+
"clone",
|
|
364
|
+
"--depth",
|
|
365
|
+
"1",
|
|
366
|
+
"--branch",
|
|
367
|
+
branch,
|
|
368
|
+
repo_url,
|
|
369
|
+
str(repo_path),
|
|
370
|
+
],
|
|
371
|
+
check=True,
|
|
372
|
+
capture_output=True,
|
|
373
|
+
timeout=60,
|
|
374
|
+
)
|
|
375
|
+
logger.debug(f"Skills repository cloned to {repo_path}")
|
|
376
|
+
|
|
377
|
+
return repo_path
|
|
378
|
+
|
|
379
|
+
except subprocess.TimeoutExpired:
|
|
380
|
+
logger.warning(f"Git operation timed out for {repo_url}")
|
|
381
|
+
return None
|
|
382
|
+
except subprocess.CalledProcessError as e:
|
|
383
|
+
logger.warning(
|
|
384
|
+
f"Failed to clone/update repository {repo_url}: {e.stderr.decode()}"
|
|
385
|
+
)
|
|
386
|
+
return None
|
|
387
|
+
except Exception as e:
|
|
388
|
+
logger.warning(f"Error managing skills repository: {str(e)}")
|
|
389
|
+
return None
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
def discover_skill_resources(skill_dir: Path) -> SkillResources:
|
|
393
|
+
"""Discover resource directories in a skill directory.
|
|
394
|
+
|
|
395
|
+
Scans for standard AgentSkills resource directories:
|
|
396
|
+
- scripts/: Executable scripts
|
|
397
|
+
- references/: Reference documentation
|
|
398
|
+
- assets/: Static assets
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
skill_dir: Path to the skill directory.
|
|
402
|
+
|
|
403
|
+
Returns:
|
|
404
|
+
SkillResources with lists of files in each resource directory.
|
|
405
|
+
"""
|
|
406
|
+
# Import here to avoid circular dependency
|
|
407
|
+
from openhands.sdk.context.skills.skill import SkillResources
|
|
408
|
+
|
|
409
|
+
resources = SkillResources(skill_root=str(skill_dir.resolve()))
|
|
410
|
+
|
|
411
|
+
for resource_type in RESOURCE_DIRECTORIES:
|
|
412
|
+
resource_dir = skill_dir / resource_type
|
|
413
|
+
if resource_dir.is_dir():
|
|
414
|
+
files = _list_resource_files(resource_dir, resource_type)
|
|
415
|
+
setattr(resources, resource_type, files)
|
|
416
|
+
|
|
417
|
+
return resources
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def _list_resource_files(
|
|
421
|
+
resource_dir: Path,
|
|
422
|
+
resource_type: str,
|
|
423
|
+
) -> list[str]:
|
|
424
|
+
"""List files in a resource directory.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
resource_dir: Path to the resource directory.
|
|
428
|
+
resource_type: Type of resource (scripts, references, assets).
|
|
429
|
+
|
|
430
|
+
Returns:
|
|
431
|
+
List of relative file paths within the resource directory.
|
|
432
|
+
"""
|
|
433
|
+
files: list[str] = []
|
|
434
|
+
try:
|
|
435
|
+
for item in resource_dir.rglob("*"):
|
|
436
|
+
if item.is_file():
|
|
437
|
+
# Store relative path from resource directory
|
|
438
|
+
rel_path = item.relative_to(resource_dir)
|
|
439
|
+
files.append(str(rel_path))
|
|
440
|
+
except OSError as e:
|
|
441
|
+
logger.warning(f"Error listing {resource_type} directory: {e}")
|
|
442
|
+
return sorted(files)
|
|
@@ -140,19 +140,7 @@ class LocalConversation(BaseConversation):
|
|
|
140
140
|
def _default_callback(e):
|
|
141
141
|
self._state.events.append(e)
|
|
142
142
|
|
|
143
|
-
self._hook_processor = None
|
|
144
|
-
hook_callback = None
|
|
145
|
-
if hook_config is not None:
|
|
146
|
-
self._hook_processor, hook_callback = create_hook_callback(
|
|
147
|
-
hook_config=hook_config,
|
|
148
|
-
working_dir=str(self.workspace.working_dir),
|
|
149
|
-
session_id=str(desired_id),
|
|
150
|
-
)
|
|
151
|
-
|
|
152
143
|
callback_list = list(callbacks) if callbacks else []
|
|
153
|
-
if hook_callback is not None:
|
|
154
|
-
callback_list.insert(0, hook_callback)
|
|
155
|
-
|
|
156
144
|
composed_list = callback_list + [_default_callback]
|
|
157
145
|
# Handle visualization configuration
|
|
158
146
|
if isinstance(visualizer, ConversationVisualizerBase):
|
|
@@ -175,7 +163,20 @@ class LocalConversation(BaseConversation):
|
|
|
175
163
|
# No visualization (visualizer is None)
|
|
176
164
|
self._visualizer = None
|
|
177
165
|
|
|
178
|
-
|
|
166
|
+
# Compose the base callback chain (visualizer -> user callbacks -> default)
|
|
167
|
+
base_callback = BaseConversation.compose_callbacks(composed_list)
|
|
168
|
+
|
|
169
|
+
# If hooks configured, wrap with hook processor that forwards to base chain
|
|
170
|
+
self._hook_processor = None
|
|
171
|
+
if hook_config is not None:
|
|
172
|
+
self._hook_processor, self._on_event = create_hook_callback(
|
|
173
|
+
hook_config=hook_config,
|
|
174
|
+
working_dir=str(self.workspace.working_dir),
|
|
175
|
+
session_id=str(desired_id),
|
|
176
|
+
original_callback=base_callback,
|
|
177
|
+
)
|
|
178
|
+
else:
|
|
179
|
+
self._on_event = base_callback
|
|
179
180
|
self._on_token = (
|
|
180
181
|
BaseConversation.compose_callbacks(token_callbacks)
|
|
181
182
|
if token_callbacks
|
|
@@ -335,12 +336,39 @@ class LocalConversation(BaseConversation):
|
|
|
335
336
|
# Before value can be modified step can be taken
|
|
336
337
|
# Ensure step conditions are checked when lock is already acquired
|
|
337
338
|
if self._state.execution_status in [
|
|
338
|
-
ConversationExecutionStatus.FINISHED,
|
|
339
339
|
ConversationExecutionStatus.PAUSED,
|
|
340
340
|
ConversationExecutionStatus.STUCK,
|
|
341
341
|
]:
|
|
342
342
|
break
|
|
343
343
|
|
|
344
|
+
# Handle stop hooks on FINISHED
|
|
345
|
+
if (
|
|
346
|
+
self._state.execution_status
|
|
347
|
+
== ConversationExecutionStatus.FINISHED
|
|
348
|
+
):
|
|
349
|
+
if self._hook_processor is not None:
|
|
350
|
+
should_stop, feedback = self._hook_processor.run_stop(
|
|
351
|
+
reason="agent_finished"
|
|
352
|
+
)
|
|
353
|
+
if not should_stop:
|
|
354
|
+
logger.info("Stop hook denied agent stopping")
|
|
355
|
+
if feedback:
|
|
356
|
+
prefixed = f"[Stop hook feedback] {feedback}"
|
|
357
|
+
feedback_msg = MessageEvent(
|
|
358
|
+
source="user",
|
|
359
|
+
llm_message=Message(
|
|
360
|
+
role="user",
|
|
361
|
+
content=[TextContent(text=prefixed)],
|
|
362
|
+
),
|
|
363
|
+
)
|
|
364
|
+
self._on_event(feedback_msg)
|
|
365
|
+
self._state.execution_status = (
|
|
366
|
+
ConversationExecutionStatus.RUNNING
|
|
367
|
+
)
|
|
368
|
+
continue
|
|
369
|
+
# No hooks or hooks allowed stopping
|
|
370
|
+
break
|
|
371
|
+
|
|
344
372
|
# Check for stuck patterns if enabled
|
|
345
373
|
if self._stuck_detector:
|
|
346
374
|
is_stuck = self._stuck_detector.is_stuck()
|
|
@@ -5,7 +5,7 @@ from enum import Enum
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
from typing import Any, Self
|
|
7
7
|
|
|
8
|
-
from pydantic import
|
|
8
|
+
from pydantic import Field, PrivateAttr, model_validator
|
|
9
9
|
|
|
10
10
|
from openhands.sdk.agent.base import AgentBase
|
|
11
11
|
from openhands.sdk.conversation.conversation_stats import ConversationStats
|
|
@@ -60,7 +60,10 @@ class ConversationState(OpenHandsModel):
|
|
|
60
60
|
)
|
|
61
61
|
workspace: BaseWorkspace = Field(
|
|
62
62
|
...,
|
|
63
|
-
description=
|
|
63
|
+
description=(
|
|
64
|
+
"Workspace used by the agent to execute commands and read/write files. "
|
|
65
|
+
"Not the process working directory."
|
|
66
|
+
),
|
|
64
67
|
)
|
|
65
68
|
persistence_dir: str | None = Field(
|
|
66
69
|
default="workspace/conversations",
|
|
@@ -116,8 +119,6 @@ class ConversationState(OpenHandsModel):
|
|
|
116
119
|
secret_registry: SecretRegistry = Field(
|
|
117
120
|
default_factory=SecretRegistry,
|
|
118
121
|
description="Registry for handling secrets and sensitive data",
|
|
119
|
-
validation_alias=AliasChoices("secret_registry", "secrets_manager"),
|
|
120
|
-
serialization_alias="secret_registry",
|
|
121
122
|
)
|
|
122
123
|
|
|
123
124
|
# ===== Private attrs (NOT Fields) =====
|
|
@@ -133,6 +134,14 @@ class ConversationState(OpenHandsModel):
|
|
|
133
134
|
default_factory=FIFOLock
|
|
134
135
|
) # FIFO lock for thread safety
|
|
135
136
|
|
|
137
|
+
@model_validator(mode="before")
|
|
138
|
+
@classmethod
|
|
139
|
+
def _handle_secrets_manager_alias(cls, data: Any) -> Any:
|
|
140
|
+
"""Handle legacy 'secrets_manager' field name for backward compatibility."""
|
|
141
|
+
if isinstance(data, dict) and "secrets_manager" in data:
|
|
142
|
+
data["secret_registry"] = data.pop("secrets_manager")
|
|
143
|
+
return data
|
|
144
|
+
|
|
136
145
|
@property
|
|
137
146
|
def events(self) -> EventLog:
|
|
138
147
|
return self._events
|
|
@@ -172,10 +181,35 @@ class ConversationState(OpenHandsModel):
|
|
|
172
181
|
max_iterations: int = 500,
|
|
173
182
|
stuck_detection: bool = True,
|
|
174
183
|
) -> "ConversationState":
|
|
175
|
-
"""
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
184
|
+
"""Create a new conversation state or resume from persistence.
|
|
185
|
+
|
|
186
|
+
This factory method handles both new conversation creation and resumption
|
|
187
|
+
from persisted state.
|
|
188
|
+
|
|
189
|
+
**New conversation:**
|
|
190
|
+
The provided Agent is used directly. Pydantic validation happens via the
|
|
191
|
+
cls() constructor.
|
|
192
|
+
|
|
193
|
+
**Restored conversation:**
|
|
194
|
+
The provided Agent is validated against the persisted agent using
|
|
195
|
+
agent.load(). Tools must match (they may have been used in conversation
|
|
196
|
+
history), but all other configuration can be freely changed: LLM,
|
|
197
|
+
agent_context, condenser, system prompts, etc.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
id: Unique conversation identifier
|
|
201
|
+
agent: The Agent to use (tools must match persisted on restore)
|
|
202
|
+
workspace: Working directory for agent operations
|
|
203
|
+
persistence_dir: Directory for persisting state and events
|
|
204
|
+
max_iterations: Maximum iterations per run
|
|
205
|
+
stuck_detection: Whether to enable stuck detection
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
ConversationState ready for use
|
|
209
|
+
|
|
210
|
+
Raises:
|
|
211
|
+
ValueError: If conversation ID or tools mismatch on restore
|
|
212
|
+
ValidationError: If agent or other fields fail Pydantic validation
|
|
179
213
|
"""
|
|
180
214
|
file_store = (
|
|
181
215
|
LocalFileStore(persistence_dir, cache_limit_size=max_iterations)
|
|
@@ -192,28 +226,28 @@ class ConversationState(OpenHandsModel):
|
|
|
192
226
|
if base_text:
|
|
193
227
|
state = cls.model_validate(json.loads(base_text))
|
|
194
228
|
|
|
195
|
-
#
|
|
229
|
+
# Restore the conversation with the same id
|
|
196
230
|
if state.id != id:
|
|
197
231
|
raise ValueError(
|
|
198
232
|
f"Conversation ID mismatch: provided {id}, "
|
|
199
233
|
f"but persisted state has {state.id}"
|
|
200
234
|
)
|
|
201
235
|
|
|
202
|
-
# Attach event log early so we can read history
|
|
236
|
+
# Attach event log early so we can read history for tool verification
|
|
203
237
|
state._fs = file_store
|
|
204
238
|
state._events = EventLog(file_store, dir_path=EVENTS_DIR)
|
|
205
239
|
|
|
206
|
-
#
|
|
207
|
-
|
|
208
|
-
resolved = agent.resolve_diff_from_deserialized(
|
|
209
|
-
state.agent, events=state._events
|
|
210
|
-
)
|
|
240
|
+
# Verify compatibility (agent class + tools)
|
|
241
|
+
agent.verify(state.agent, events=state._events)
|
|
211
242
|
|
|
212
|
-
# Commit
|
|
243
|
+
# Commit runtime-provided values (may autosave)
|
|
213
244
|
state._autosave_enabled = True
|
|
214
|
-
state.agent =
|
|
245
|
+
state.agent = agent
|
|
246
|
+
state.workspace = workspace
|
|
247
|
+
state.max_iterations = max_iterations
|
|
215
248
|
|
|
216
|
-
|
|
249
|
+
# Note: stats are already deserialized from base_state.json above.
|
|
250
|
+
# Do NOT reset stats here - this would lose accumulated metrics.
|
|
217
251
|
|
|
218
252
|
logger.info(
|
|
219
253
|
f"Resumed conversation {state.id} from persistent storage.\n"
|
|
@@ -236,8 +270,6 @@ class ConversationState(OpenHandsModel):
|
|
|
236
270
|
max_iterations=max_iterations,
|
|
237
271
|
stuck_detection=stuck_detection,
|
|
238
272
|
)
|
|
239
|
-
# Record existing analyzer configuration in state
|
|
240
|
-
state.security_analyzer = state.security_analyzer
|
|
241
273
|
state._fs = file_store
|
|
242
274
|
state._events = EventLog(file_store, dir_path=EVENTS_DIR)
|
|
243
275
|
state.stats = ConversationStats()
|
|
@@ -65,6 +65,20 @@ class ActionEvent(LLMConvertibleEvent):
|
|
|
65
65
|
description="The LLM's assessment of the safety risk of this action.",
|
|
66
66
|
)
|
|
67
67
|
|
|
68
|
+
summary: str | None = Field(
|
|
69
|
+
default=None,
|
|
70
|
+
description=(
|
|
71
|
+
"A concise summary (approximately 10 words) of what this action does, "
|
|
72
|
+
"provided by the LLM for explainability and debugging. "
|
|
73
|
+
"Examples of good summaries: "
|
|
74
|
+
"'editing configuration file for deployment settings' | "
|
|
75
|
+
"'searching codebase for authentication function definitions' | "
|
|
76
|
+
"'installing required dependencies from package manifest' | "
|
|
77
|
+
"'running tests to verify bug fix' | "
|
|
78
|
+
"'viewing directory structure to locate source files'"
|
|
79
|
+
),
|
|
80
|
+
)
|
|
81
|
+
|
|
68
82
|
@property
|
|
69
83
|
def visualize(self) -> Text:
|
|
70
84
|
"""Return Rich Text representation of this action event."""
|
|
@@ -73,6 +87,12 @@ class ActionEvent(LLMConvertibleEvent):
|
|
|
73
87
|
if self.security_risk != risk.SecurityRisk.UNKNOWN:
|
|
74
88
|
content.append(self.security_risk.visualize)
|
|
75
89
|
|
|
90
|
+
# Display summary if available
|
|
91
|
+
if self.summary:
|
|
92
|
+
content.append("Summary: ", style="bold cyan")
|
|
93
|
+
content.append(self.summary)
|
|
94
|
+
content.append("\n\n")
|
|
95
|
+
|
|
76
96
|
# Display reasoning content first if available
|
|
77
97
|
if self.reasoning_content:
|
|
78
98
|
content.append("Reasoning:\n", style="bold")
|