openhands-sdk 1.8.1__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. openhands/sdk/agent/agent.py +64 -0
  2. openhands/sdk/agent/base.py +29 -10
  3. openhands/sdk/agent/prompts/system_prompt.j2 +1 -0
  4. openhands/sdk/context/condenser/llm_summarizing_condenser.py +7 -5
  5. openhands/sdk/context/skills/skill.py +59 -1
  6. openhands/sdk/context/skills/utils.py +6 -65
  7. openhands/sdk/context/view.py +6 -11
  8. openhands/sdk/conversation/base.py +5 -0
  9. openhands/sdk/conversation/event_store.py +84 -12
  10. openhands/sdk/conversation/impl/local_conversation.py +7 -0
  11. openhands/sdk/conversation/impl/remote_conversation.py +16 -3
  12. openhands/sdk/conversation/state.py +25 -2
  13. openhands/sdk/conversation/visualizer/base.py +23 -0
  14. openhands/sdk/critic/__init__.py +4 -1
  15. openhands/sdk/critic/base.py +17 -20
  16. openhands/sdk/critic/impl/__init__.py +2 -0
  17. openhands/sdk/critic/impl/agent_finished.py +9 -5
  18. openhands/sdk/critic/impl/api/__init__.py +18 -0
  19. openhands/sdk/critic/impl/api/chat_template.py +232 -0
  20. openhands/sdk/critic/impl/api/client.py +313 -0
  21. openhands/sdk/critic/impl/api/critic.py +90 -0
  22. openhands/sdk/critic/impl/api/taxonomy.py +180 -0
  23. openhands/sdk/critic/result.py +148 -0
  24. openhands/sdk/event/conversation_error.py +12 -0
  25. openhands/sdk/event/llm_convertible/action.py +10 -0
  26. openhands/sdk/event/llm_convertible/message.py +10 -0
  27. openhands/sdk/git/cached_repo.py +459 -0
  28. openhands/sdk/git/utils.py +118 -3
  29. openhands/sdk/hooks/__init__.py +7 -1
  30. openhands/sdk/hooks/config.py +154 -45
  31. openhands/sdk/io/base.py +52 -0
  32. openhands/sdk/io/local.py +25 -0
  33. openhands/sdk/io/memory.py +34 -1
  34. openhands/sdk/llm/llm.py +6 -2
  35. openhands/sdk/llm/utils/model_features.py +3 -0
  36. openhands/sdk/llm/utils/telemetry.py +41 -2
  37. openhands/sdk/plugin/__init__.py +17 -0
  38. openhands/sdk/plugin/fetch.py +231 -0
  39. openhands/sdk/plugin/plugin.py +61 -4
  40. openhands/sdk/plugin/types.py +394 -1
  41. openhands/sdk/secret/secrets.py +19 -4
  42. {openhands_sdk-1.8.1.dist-info → openhands_sdk-1.9.0.dist-info}/METADATA +6 -1
  43. {openhands_sdk-1.8.1.dist-info → openhands_sdk-1.9.0.dist-info}/RECORD +45 -37
  44. {openhands_sdk-1.8.1.dist-info → openhands_sdk-1.9.0.dist-info}/WHEEL +1 -1
  45. {openhands_sdk-1.8.1.dist-info → openhands_sdk-1.9.0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import re
2
3
  import shlex
3
4
  import subprocess
4
5
  from pathlib import Path
@@ -13,12 +14,17 @@ logger = logging.getLogger(__name__)
13
14
  GIT_EMPTY_TREE_HASH = "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
14
15
 
15
16
 
16
- def run_git_command(args: list[str], cwd: str | Path) -> str:
17
+ def run_git_command(
18
+ args: list[str],
19
+ cwd: str | Path | None = None,
20
+ timeout: int = 30,
21
+ ) -> str:
17
22
  """Run a git command safely without shell injection vulnerabilities.
18
23
 
19
24
  Args:
20
25
  args: List of command arguments (e.g., ['git', 'status', '--porcelain'])
21
- cwd: Working directory to run the command in
26
+ cwd: Working directory to run the command in (optional for commands like clone)
27
+ timeout: Timeout in seconds (default: 30)
22
28
 
23
29
  Returns:
24
30
  Command output as string
@@ -33,7 +39,7 @@ def run_git_command(args: list[str], cwd: str | Path) -> str:
33
39
  capture_output=True,
34
40
  text=True,
35
41
  check=False,
36
- timeout=30, # Prevent hanging commands
42
+ timeout=timeout,
37
43
  )
38
44
 
39
45
  if result.returncode != 0:
@@ -212,3 +218,112 @@ def validate_git_repository(repo_dir: str | Path) -> Path:
212
218
  raise GitRepositoryError(f"Not a git repository: {repo_path}") from e
213
219
 
214
220
  return repo_path
221
+
222
+
223
+ # ============================================================================
224
+ # Git URL utilities
225
+ # ============================================================================
226
+
227
+
228
+ def is_git_url(source: str) -> bool:
229
+ """Check if a source string looks like a git URL.
230
+
231
+ Detects git URLs by their protocol/scheme rather than enumerating providers.
232
+ This handles any git hosting service (GitHub, GitLab, Codeberg, self-hosted, etc.)
233
+
234
+ Args:
235
+ source: String to check.
236
+
237
+ Returns:
238
+ True if the string appears to be a git URL, False otherwise.
239
+
240
+ Examples:
241
+ >>> is_git_url("https://github.com/owner/repo.git")
242
+ True
243
+ >>> is_git_url("git@github.com:owner/repo.git")
244
+ True
245
+ >>> is_git_url("/local/path")
246
+ False
247
+ """
248
+ # HTTPS/HTTP URLs to git repositories
249
+ if source.startswith(("https://", "http://")):
250
+ return True
251
+
252
+ # SSH format: git@host:path or user@host:path
253
+ if re.match(r"^[\w.-]+@[\w.-]+:", source):
254
+ return True
255
+
256
+ # Git protocol
257
+ if source.startswith("git://"):
258
+ return True
259
+
260
+ # File protocol (for testing)
261
+ if source.startswith("file://"):
262
+ return True
263
+
264
+ return False
265
+
266
+
267
+ def normalize_git_url(url: str) -> str:
268
+ """Normalize a git URL by ensuring .git suffix for HTTPS URLs.
269
+
270
+ Args:
271
+ url: Git URL to normalize.
272
+
273
+ Returns:
274
+ Normalized URL with .git suffix for HTTPS/HTTP URLs.
275
+
276
+ Examples:
277
+ >>> normalize_git_url("https://github.com/owner/repo")
278
+ "https://github.com/owner/repo.git"
279
+ >>> normalize_git_url("https://github.com/owner/repo.git")
280
+ "https://github.com/owner/repo.git"
281
+ >>> normalize_git_url("git@github.com:owner/repo.git")
282
+ "git@github.com:owner/repo.git"
283
+ """
284
+ if url.startswith(("https://", "http://")) and not url.endswith(".git"):
285
+ url = url.rstrip("/")
286
+ url = f"{url}.git"
287
+ return url
288
+
289
+
290
+ def extract_repo_name(source: str) -> str:
291
+ """Extract a human-readable repository name from a git URL or path.
292
+
293
+ Extracts the last path component (repo name) and sanitizes it for use
294
+ in directory names or display purposes.
295
+
296
+ Args:
297
+ source: Git URL or local path string.
298
+
299
+ Returns:
300
+ A sanitized name suitable for use in directory names (max 32 chars).
301
+
302
+ Examples:
303
+ >>> extract_repo_name("https://github.com/owner/my-repo.git")
304
+ "my-repo"
305
+ >>> extract_repo_name("git@github.com:owner/my-repo.git")
306
+ "my-repo"
307
+ >>> extract_repo_name("/path/to/local-repo")
308
+ "local-repo"
309
+ """
310
+ # Strip common prefixes to get to the path portion
311
+ name = source
312
+ for prefix in ("github:", "https://", "http://", "git://", "file://"):
313
+ if name.startswith(prefix):
314
+ name = name[len(prefix) :]
315
+ break
316
+
317
+ # Handle SSH format: user@host:path -> path
318
+ if "@" in name and ":" in name and "/" not in name.split(":")[0]:
319
+ name = name.split(":", 1)[1]
320
+
321
+ # Remove .git suffix and get last path component
322
+ name = name.rstrip("/").removesuffix(".git")
323
+ name = name.rsplit("/", 1)[-1]
324
+
325
+ # Sanitize: keep alphanumeric, dash, underscore only
326
+ name = re.sub(r"[^a-zA-Z0-9_-]", "-", name)
327
+ name = re.sub(r"-+", "-", name).strip("-")
328
+
329
+ return name[:32] if name else "repo"
@@ -5,7 +5,12 @@ Hooks are event-driven scripts that execute at specific lifecycle events
5
5
  during agent execution, enabling deterministic control over agent behavior.
6
6
  """
7
7
 
8
- from openhands.sdk.hooks.config import HookConfig, HookDefinition, HookMatcher
8
+ from openhands.sdk.hooks.config import (
9
+ HookConfig,
10
+ HookDefinition,
11
+ HookMatcher,
12
+ HookType,
13
+ )
9
14
  from openhands.sdk.hooks.conversation_hooks import (
10
15
  HookEventProcessor,
11
16
  create_hook_callback,
@@ -19,6 +24,7 @@ __all__ = [
19
24
  "HookConfig",
20
25
  "HookDefinition",
21
26
  "HookMatcher",
27
+ "HookType",
22
28
  "HookExecutor",
23
29
  "HookResult",
24
30
  "HookManager",
@@ -7,7 +7,7 @@ from enum import Enum
7
7
  from pathlib import Path
8
8
  from typing import Any
9
9
 
10
- from pydantic import BaseModel, Field
10
+ from pydantic import BaseModel, Field, model_validator
11
11
 
12
12
  from openhands.sdk.hooks.types import HookEventType
13
13
 
@@ -15,6 +15,26 @@ from openhands.sdk.hooks.types import HookEventType
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
 
18
+ def _pascal_to_snake(name: str) -> str:
19
+ """Convert PascalCase to snake_case."""
20
+ # Insert underscore before uppercase letters and lowercase everything
21
+ result = re.sub(r"(?<!^)(?=[A-Z])", "_", name).lower()
22
+ return result
23
+
24
+
25
+ # Valid snake_case field names for hook events
26
+ _VALID_HOOK_FIELDS: frozenset[str] = frozenset(
27
+ {
28
+ "pre_tool_use",
29
+ "post_tool_use",
30
+ "user_prompt_submit",
31
+ "session_start",
32
+ "session_end",
33
+ "stop",
34
+ }
35
+ )
36
+
37
+
18
38
  class HookType(str, Enum):
19
39
  """Types of hooks that can be executed."""
20
40
 
@@ -77,9 +97,117 @@ class HookMatcher(BaseModel):
77
97
 
78
98
 
79
99
  class HookConfig(BaseModel):
80
- """Configuration for all hooks, loaded from .openhands/hooks.json."""
100
+ """Configuration for all hooks.
101
+
102
+ Hooks can be configured either by loading from `.openhands/hooks.json` or
103
+ by directly instantiating with typed fields:
104
+
105
+ # Direct instantiation with typed fields (recommended):
106
+ config = HookConfig(
107
+ pre_tool_use=[
108
+ HookMatcher(
109
+ matcher="terminal",
110
+ hooks=[HookDefinition(command="block_dangerous.sh")]
111
+ )
112
+ ]
113
+ )
114
+
115
+ # Load from JSON file:
116
+ config = HookConfig.load(".openhands/hooks.json")
117
+ """
118
+
119
+ model_config = {
120
+ "extra": "forbid",
121
+ }
122
+
123
+ pre_tool_use: list[HookMatcher] = Field(
124
+ default_factory=list,
125
+ description="Hooks that run before tool execution",
126
+ )
127
+ post_tool_use: list[HookMatcher] = Field(
128
+ default_factory=list,
129
+ description="Hooks that run after tool execution",
130
+ )
131
+ user_prompt_submit: list[HookMatcher] = Field(
132
+ default_factory=list,
133
+ description="Hooks that run when user submits a prompt",
134
+ )
135
+ session_start: list[HookMatcher] = Field(
136
+ default_factory=list,
137
+ description="Hooks that run when a session starts",
138
+ )
139
+ session_end: list[HookMatcher] = Field(
140
+ default_factory=list,
141
+ description="Hooks that run when a session ends",
142
+ )
143
+ stop: list[HookMatcher] = Field(
144
+ default_factory=list,
145
+ description="Hooks that run when the agent attempts to stop",
146
+ )
147
+
148
+ def is_empty(self) -> bool:
149
+ """Check if this config has no hooks configured."""
150
+ return not any(
151
+ [
152
+ self.pre_tool_use,
153
+ self.post_tool_use,
154
+ self.user_prompt_submit,
155
+ self.session_start,
156
+ self.session_end,
157
+ self.stop,
158
+ ]
159
+ )
160
+
161
+ @model_validator(mode="before")
162
+ @classmethod
163
+ def _normalize_hooks_input(cls, data: Any) -> Any:
164
+ """Support JSON format with PascalCase keys and 'hooks' wrapper.
165
+
166
+ We intentionally continue supporting these formats for interoperability with
167
+ existing integrations (e.g. Claude Code plugin hook files).
168
+ """
169
+ if not isinstance(data, dict):
170
+ return data
171
+
172
+ # Unwrap legacy format: {"hooks": {"PreToolUse": [...]}}
173
+ if "hooks" in data:
174
+ if len(data) != 1:
175
+ logger.warning(
176
+ 'HookConfig legacy wrapper format should be {"hooks": {...}}. '
177
+ "Extra top-level keys will be ignored."
178
+ )
179
+ data = data["hooks"]
180
+
181
+ # Convert PascalCase keys to snake_case field names
182
+ normalized: dict[str, Any] = {}
183
+ seen_fields: set[str] = set()
184
+
185
+ for key, value in data.items():
186
+ snake_key = _pascal_to_snake(key)
187
+ is_pascal_case = snake_key != key
188
+
189
+ if is_pascal_case:
190
+ # Validate that PascalCase key maps to a known field
191
+ if snake_key not in _VALID_HOOK_FIELDS:
192
+ valid_types = ", ".join(sorted(_VALID_HOOK_FIELDS))
193
+ raise ValueError(
194
+ f"Unknown event type '{key}'. Valid types: {valid_types}"
195
+ )
196
+
197
+ # Check for duplicate keys (both PascalCase and snake_case provided)
198
+ if snake_key in seen_fields:
199
+ raise ValueError(
200
+ f"Duplicate hook event: both '{key}' and its snake_case "
201
+ f"equivalent '{snake_key}' were provided"
202
+ )
203
+ seen_fields.add(snake_key)
204
+ normalized[snake_key] = value
205
+
206
+ # Preserve backwards compatibility without deprecating any supported formats.
207
+ # The legacy 'hooks' wrapper and PascalCase keys are accepted for
208
+ # interoperability and should not emit a deprecation warning.
81
209
 
82
- hooks: dict[str, list[HookMatcher]] = Field(default_factory=dict)
210
+ return normalized
83
211
 
84
212
  @classmethod
85
213
  def load(
@@ -111,49 +239,34 @@ class HookConfig(BaseModel):
111
239
  if not path.exists():
112
240
  return cls()
113
241
 
114
- try:
115
- with open(path) as f:
116
- data = json.load(f)
117
- return cls.from_dict(data)
118
- except (json.JSONDecodeError, OSError) as e:
119
- # Log warning but don't fail - just return empty config
120
- logger.warning(f"Failed to load hooks from {path}: {e}")
121
- return cls()
242
+ with open(path) as f:
243
+ data = json.load(f)
244
+ # Use model_validate which triggers the model_validator
245
+ return cls.model_validate(data)
122
246
 
123
247
  @classmethod
124
248
  def from_dict(cls, data: dict[str, Any]) -> "HookConfig":
125
- """Create HookConfig from a dictionary."""
126
- hooks_data = data.get("hooks", {})
127
- hooks: dict[str, list[HookMatcher]] = {}
128
-
129
- for event_type, matchers in hooks_data.items():
130
- if not isinstance(matchers, list):
131
- continue
132
-
133
- hooks[event_type] = []
134
- for matcher_data in matchers:
135
- if isinstance(matcher_data, dict):
136
- # Parse hooks within the matcher
137
- hook_defs = []
138
- for hook_data in matcher_data.get("hooks", []):
139
- if isinstance(hook_data, dict):
140
- hook_defs.append(HookDefinition(**hook_data))
141
-
142
- hooks[event_type].append(
143
- HookMatcher(
144
- matcher=matcher_data.get("matcher", "*"),
145
- hooks=hook_defs,
146
- )
147
- )
249
+ """Create HookConfig from a dictionary.
250
+
251
+ Supports both legacy format with "hooks" wrapper and direct format:
252
+ # Legacy format:
253
+ {"hooks": {"PreToolUse": [...]}}
148
254
 
149
- return cls(hooks=hooks)
255
+ # Direct format:
256
+ {"PreToolUse": [...]}
257
+ """
258
+ return cls.model_validate(data)
259
+
260
+ def _get_matchers_for_event(self, event_type: HookEventType) -> list[HookMatcher]:
261
+ """Get matchers for an event type."""
262
+ field_name = _pascal_to_snake(event_type.value)
263
+ return getattr(self, field_name, [])
150
264
 
151
265
  def get_hooks_for_event(
152
266
  self, event_type: HookEventType, tool_name: str | None = None
153
267
  ) -> list[HookDefinition]:
154
268
  """Get all hooks that should run for an event."""
155
- event_key = event_type.value
156
- matchers = self.hooks.get(event_key, [])
269
+ matchers = self._get_matchers_for_event(event_type)
157
270
 
158
271
  result: list[HookDefinition] = []
159
272
  for matcher in matchers:
@@ -164,17 +277,13 @@ class HookConfig(BaseModel):
164
277
 
165
278
  def has_hooks_for_event(self, event_type: HookEventType) -> bool:
166
279
  """Check if there are any hooks configured for an event type."""
167
- return event_type.value in self.hooks and len(self.hooks[event_type.value]) > 0
168
-
169
- def to_dict(self) -> dict[str, Any]:
170
- """Convert to dictionary format for serialization."""
171
- hooks_dict = {k: [m.model_dump() for m in v] for k, v in self.hooks.items()}
172
- return {"hooks": hooks_dict}
280
+ matchers = self._get_matchers_for_event(event_type)
281
+ return len(matchers) > 0
173
282
 
174
283
  def save(self, path: str | Path) -> None:
175
- """Save hook configuration to a JSON file."""
284
+ """Save hook configuration to a JSON file using snake_case field names."""
176
285
  path = Path(path)
177
286
  path.parent.mkdir(parents=True, exist_ok=True)
178
287
 
179
288
  with open(path, "w") as f:
180
- json.dump(self.to_dict(), f, indent=2)
289
+ json.dump(self.model_dump(mode="json", exclude_defaults=True), f, indent=2)
openhands/sdk/io/base.py CHANGED
@@ -1,4 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
+ from collections.abc import Iterator
3
+ from contextlib import contextmanager
2
4
 
3
5
 
4
6
  class FileStore(ABC):
@@ -6,6 +8,9 @@ class FileStore(ABC):
6
8
 
7
9
  This class defines the interface for file storage backends that can
8
10
  handle basic file operations like reading, writing, listing, and deleting files.
11
+
12
+ Implementations should provide a locking mechanism via the `lock()` context
13
+ manager for thread/process-safe operations.
9
14
  """
10
15
 
11
16
  @abstractmethod
@@ -46,3 +51,50 @@ class FileStore(ABC):
46
51
  Args:
47
52
  path: The file or directory path to delete.
48
53
  """
54
+
55
+ @abstractmethod
56
+ def exists(self, path: str) -> bool:
57
+ """Check if a file or directory exists at the specified path.
58
+
59
+ Args:
60
+ path: The file or directory path to check.
61
+
62
+ Returns:
63
+ True if the path exists, False otherwise.
64
+ """
65
+
66
+ @abstractmethod
67
+ def get_absolute_path(self, path: str) -> str:
68
+ """Get the absolute filesystem path for a given relative path.
69
+
70
+ Args:
71
+ path: The relative path within the file store.
72
+
73
+ Returns:
74
+ The absolute path on the filesystem.
75
+ """
76
+
77
+ @abstractmethod
78
+ @contextmanager
79
+ def lock(self, path: str, timeout: float = 30.0) -> Iterator[None]:
80
+ """Acquire an exclusive lock for the given path.
81
+
82
+ This context manager provides thread and process-safe locking.
83
+ Implementations may use file-based locking, threading locks, or
84
+ other mechanisms as appropriate.
85
+
86
+ Args:
87
+ path: The path to lock (used to identify the lock).
88
+ timeout: Maximum seconds to wait for lock acquisition.
89
+
90
+ Yields:
91
+ None when lock is acquired.
92
+
93
+ Raises:
94
+ TimeoutError: If lock cannot be acquired within timeout.
95
+
96
+ Note:
97
+ File-based locking (flock) does NOT work reliably on NFS mounts
98
+ or network filesystems.
99
+ """
100
+ yield # pragma: no cover
openhands/sdk/io/local.py CHANGED
@@ -1,5 +1,9 @@
1
1
  import os
2
2
  import shutil
3
+ from collections.abc import Iterator
4
+ from contextlib import contextmanager
5
+
6
+ from filelock import FileLock, Timeout
3
7
 
4
8
  from openhands.sdk.io.cache import MemoryLRUCache
5
9
  from openhands.sdk.logger import get_logger
@@ -117,3 +121,24 @@ class LocalFileStore(FileStore):
117
121
 
118
122
  except Exception as e:
119
123
  logger.error(f"Error clearing local file store: {str(e)}")
124
+
125
+ def exists(self, path: str) -> bool:
126
+ """Check if a file or directory exists."""
127
+ return os.path.exists(self.get_full_path(path))
128
+
129
+ def get_absolute_path(self, path: str) -> str:
130
+ """Get absolute filesystem path."""
131
+ return self.get_full_path(path)
132
+
133
+ @contextmanager
134
+ def lock(self, path: str, timeout: float = 30.0) -> Iterator[None]:
135
+ """Acquire file-based lock using flock."""
136
+ lock_path = self.get_full_path(path)
137
+ os.makedirs(os.path.dirname(lock_path), exist_ok=True)
138
+ file_lock = FileLock(lock_path)
139
+ try:
140
+ with file_lock.acquire(timeout=timeout):
141
+ yield
142
+ except Timeout:
143
+ logger.error(f"Failed to acquire lock within {timeout}s: {lock_path}")
144
+ raise TimeoutError(f"Lock acquisition timed out: {path}")
@@ -1,4 +1,8 @@
1
1
  import os
2
+ import threading
3
+ import uuid
4
+ from collections.abc import Iterator
5
+ from contextlib import contextmanager
2
6
 
3
7
  from openhands.sdk.io.base import FileStore
4
8
  from openhands.sdk.logger import get_logger
@@ -9,9 +13,13 @@ logger = get_logger(__name__)
9
13
 
10
14
  class InMemoryFileStore(FileStore):
11
15
  files: dict[str, str]
16
+ _instance_id: str
17
+ _lock: threading.Lock
12
18
 
13
19
  def __init__(self, files: dict[str, str] | None = None) -> None:
14
20
  self.files = {}
21
+ self._instance_id = uuid.uuid4().hex
22
+ self._lock = threading.Lock()
15
23
  if files is not None:
16
24
  self.files = files
17
25
 
@@ -51,4 +59,29 @@ class InMemoryFileStore(FileStore):
51
59
  del self.files[key]
52
60
  logger.debug(f"Cleared in-memory file store: {path}")
53
61
  except Exception as e:
54
- logger.error(f"Error clearing in-memory file store: {str(e)}")
62
+ logger.error(f"Error clearing in-memory file store: {e}")
63
+
64
+ def exists(self, path: str) -> bool:
65
+ """Check if a file exists."""
66
+ if path in self.files:
67
+ return True
68
+ return any(f.startswith(path + "/") for f in self.files)
69
+
70
+ def get_absolute_path(self, path: str) -> str:
71
+ """Get absolute path (uses temp dir with unique instance ID)."""
72
+ import tempfile
73
+
74
+ return os.path.join(
75
+ tempfile.gettempdir(), f"openhands_inmemory_{self._instance_id}", path
76
+ )
77
+
78
+ @contextmanager
79
+ def lock(self, path: str, timeout: float = 30.0) -> Iterator[None]:
80
+ """Acquire thread lock for in-memory store."""
81
+ acquired = self._lock.acquire(timeout=timeout)
82
+ if not acquired:
83
+ raise TimeoutError(f"Lock acquisition timed out: {path}")
84
+ try:
85
+ yield
86
+ finally:
87
+ self._lock.release()
openhands/sdk/llm/llm.py CHANGED
@@ -424,8 +424,11 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
424
424
  ) -> None:
425
425
  if self.retry_listener is not None:
426
426
  self.retry_listener(attempt_number, num_retries, _err)
427
- if self._telemetry is not None and _err is not None:
428
- self._telemetry.on_error(_err)
427
+ # NOTE: don't call Telemetry.on_error here.
428
+ # This function runs for each retried failure (before the next attempt),
429
+ # which would create noisy duplicate error logs.
430
+ # The completion()/responses() exception handlers call Telemetry.on_error
431
+ # after retries are exhausted (final failure), which is what we want to log.
429
432
 
430
433
  # =========================================================================
431
434
  # Serializers
@@ -697,6 +700,7 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
697
700
  telemetry_ctx.update(
698
701
  {
699
702
  "llm_path": "responses",
703
+ "instructions": instructions,
700
704
  "input": input_items[:],
701
705
  "tools": tools,
702
706
  "kwargs": {k: v for k, v in call_kwargs.items()},
@@ -63,6 +63,9 @@ REASONING_EFFORT_MODELS: list[str] = [
63
63
  "o4-mini-2025-04-16",
64
64
  "gemini-2.5-flash",
65
65
  "gemini-2.5-pro",
66
+ # Gemini 3 family
67
+ "gemini-3-flash-preview",
68
+ "gemini-3-pro-preview",
66
69
  # OpenAI GPT-5 family (includes mini variants)
67
70
  "gpt-5",
68
71
  # Anthropic Opus 4.5
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  import os
3
3
  import time
4
+ import traceback
4
5
  import uuid
5
6
  import warnings
6
7
  from collections.abc import Callable
@@ -121,7 +122,46 @@ class Telemetry(BaseModel):
121
122
  return self.metrics.deep_copy()
122
123
 
123
124
  def on_error(self, _err: BaseException) -> None:
124
- # Stub for error tracking / counters
125
+ # Best-effort logging for failed requests (so we can debug malformed
126
+ # request payloads, e.g. orphaned Responses reasoning items).
127
+ self._last_latency = time.time() - (self._req_start or time.time())
128
+
129
+ if not self.log_enabled:
130
+ return
131
+ if not self.log_dir and not self._log_completions_callback:
132
+ return
133
+
134
+ try:
135
+ filename = (
136
+ f"{self.model_name.replace('/', '__')}-"
137
+ f"{time.time():.3f}-"
138
+ f"{uuid.uuid4().hex[:4]}-error.json"
139
+ )
140
+
141
+ data = self._req_ctx.copy()
142
+ data["error"] = {
143
+ "type": type(_err).__name__,
144
+ "message": str(_err),
145
+ "repr": repr(_err),
146
+ "traceback": "".join(
147
+ traceback.format_exception(type(_err), _err, _err.__traceback__)
148
+ ),
149
+ }
150
+ data["timestamp"] = time.time()
151
+ data["latency_sec"] = self._last_latency
152
+ data["cost"] = 0.0
153
+
154
+ log_data = json.dumps(data, default=_safe_json, ensure_ascii=False)
155
+
156
+ if self._log_completions_callback:
157
+ self._log_completions_callback(filename, log_data)
158
+ elif self.log_dir:
159
+ os.makedirs(self.log_dir, exist_ok=True)
160
+ fname = os.path.join(self.log_dir, filename)
161
+ with open(fname, "w", encoding="utf-8") as f:
162
+ f.write(log_data)
163
+ except Exception as e:
164
+ warnings.warn(f"Telemetry error logging failed: {e}")
125
165
  return
126
166
 
127
167
  # ---------- Helpers ----------
@@ -335,7 +375,6 @@ class Telemetry(BaseModel):
335
375
  os.makedirs(self.log_dir, exist_ok=True)
336
376
  if not os.access(self.log_dir, os.W_OK):
337
377
  raise PermissionError(f"log_dir is not writable: {self.log_dir}")
338
-
339
378
  fname = os.path.join(self.log_dir, filename)
340
379
  with open(fname, "w", encoding="utf-8") as f:
341
380
  f.write(log_data)