openhands-sdk 1.7.0__py3-none-any.whl → 1.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. openhands/sdk/agent/agent.py +31 -1
  2. openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +1 -2
  3. openhands/sdk/agent/utils.py +9 -4
  4. openhands/sdk/context/condenser/base.py +11 -6
  5. openhands/sdk/context/condenser/llm_summarizing_condenser.py +167 -18
  6. openhands/sdk/context/condenser/no_op_condenser.py +2 -1
  7. openhands/sdk/context/condenser/pipeline_condenser.py +10 -9
  8. openhands/sdk/context/condenser/utils.py +149 -0
  9. openhands/sdk/context/skills/skill.py +85 -0
  10. openhands/sdk/context/view.py +234 -37
  11. openhands/sdk/conversation/conversation.py +6 -0
  12. openhands/sdk/conversation/impl/local_conversation.py +33 -3
  13. openhands/sdk/conversation/impl/remote_conversation.py +36 -0
  14. openhands/sdk/conversation/state.py +41 -1
  15. openhands/sdk/hooks/__init__.py +30 -0
  16. openhands/sdk/hooks/config.py +180 -0
  17. openhands/sdk/hooks/conversation_hooks.py +227 -0
  18. openhands/sdk/hooks/executor.py +155 -0
  19. openhands/sdk/hooks/manager.py +170 -0
  20. openhands/sdk/hooks/types.py +40 -0
  21. openhands/sdk/io/cache.py +85 -0
  22. openhands/sdk/io/local.py +39 -2
  23. openhands/sdk/llm/mixins/fn_call_converter.py +61 -16
  24. openhands/sdk/llm/mixins/non_native_fc.py +5 -1
  25. openhands/sdk/tool/schema.py +10 -0
  26. {openhands_sdk-1.7.0.dist-info → openhands_sdk-1.7.1.dist-info}/METADATA +1 -1
  27. {openhands_sdk-1.7.0.dist-info → openhands_sdk-1.7.1.dist-info}/RECORD +29 -21
  28. {openhands_sdk-1.7.0.dist-info → openhands_sdk-1.7.1.dist-info}/WHEEL +0 -0
  29. {openhands_sdk-1.7.0.dist-info → openhands_sdk-1.7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,170 @@
1
+ """Hook manager - orchestrates hook execution within conversations."""
2
+
3
+ from typing import Any
4
+
5
+ from openhands.sdk.hooks.config import HookConfig
6
+ from openhands.sdk.hooks.executor import HookExecutor, HookResult
7
+ from openhands.sdk.hooks.types import HookEvent, HookEventType
8
+
9
+
10
+ class HookManager:
11
+ """Manages hook execution for a conversation."""
12
+
13
+ def __init__(
14
+ self,
15
+ config: HookConfig | None = None,
16
+ working_dir: str | None = None,
17
+ session_id: str | None = None,
18
+ ):
19
+ self.config = config or HookConfig.load(working_dir=working_dir)
20
+ self.executor = HookExecutor(working_dir=working_dir)
21
+ self.session_id = session_id
22
+ self.working_dir = working_dir
23
+
24
+ def _create_event(
25
+ self,
26
+ event_type: HookEventType,
27
+ tool_name: str | None = None,
28
+ tool_input: dict[str, Any] | None = None,
29
+ tool_response: dict[str, Any] | None = None,
30
+ message: str | None = None,
31
+ metadata: dict[str, Any] | None = None,
32
+ ) -> HookEvent:
33
+ """Create a hook event with common fields populated."""
34
+ return HookEvent(
35
+ event_type=event_type,
36
+ tool_name=tool_name,
37
+ tool_input=tool_input,
38
+ tool_response=tool_response,
39
+ message=message,
40
+ session_id=self.session_id,
41
+ working_dir=self.working_dir,
42
+ metadata=metadata or {},
43
+ )
44
+
45
+ def run_pre_tool_use(
46
+ self,
47
+ tool_name: str,
48
+ tool_input: dict[str, Any],
49
+ ) -> tuple[bool, list[HookResult]]:
50
+ """Run PreToolUse hooks. Returns (should_continue, results)."""
51
+ hooks = self.config.get_hooks_for_event(HookEventType.PRE_TOOL_USE, tool_name)
52
+ if not hooks:
53
+ return True, []
54
+
55
+ event = self._create_event(
56
+ HookEventType.PRE_TOOL_USE,
57
+ tool_name=tool_name,
58
+ tool_input=tool_input,
59
+ )
60
+
61
+ results = self.executor.execute_all(hooks, event, stop_on_block=True)
62
+
63
+ # Check if any hook blocked the operation
64
+ should_continue = all(r.should_continue for r in results)
65
+
66
+ return should_continue, results
67
+
68
+ def run_post_tool_use(
69
+ self,
70
+ tool_name: str,
71
+ tool_input: dict[str, Any],
72
+ tool_response: dict[str, Any],
73
+ ) -> list[HookResult]:
74
+ """Run PostToolUse hooks after a tool completes."""
75
+ hooks = self.config.get_hooks_for_event(HookEventType.POST_TOOL_USE, tool_name)
76
+ if not hooks:
77
+ return []
78
+
79
+ event = self._create_event(
80
+ HookEventType.POST_TOOL_USE,
81
+ tool_name=tool_name,
82
+ tool_input=tool_input,
83
+ tool_response=tool_response,
84
+ )
85
+
86
+ # PostToolUse hooks don't block - they just run
87
+ return self.executor.execute_all(hooks, event, stop_on_block=False)
88
+
89
+ def run_user_prompt_submit(
90
+ self,
91
+ message: str,
92
+ ) -> tuple[bool, str | None, list[HookResult]]:
93
+ """Run UserPromptSubmit hooks."""
94
+ hooks = self.config.get_hooks_for_event(HookEventType.USER_PROMPT_SUBMIT)
95
+ if not hooks:
96
+ return True, None, []
97
+
98
+ event = self._create_event(
99
+ HookEventType.USER_PROMPT_SUBMIT,
100
+ message=message,
101
+ )
102
+
103
+ results = self.executor.execute_all(hooks, event, stop_on_block=True)
104
+
105
+ # Check if any hook blocked
106
+ should_continue = all(r.should_continue for r in results)
107
+
108
+ # Collect additional context from hooks
109
+ additional_context_parts = [
110
+ r.additional_context for r in results if r.additional_context
111
+ ]
112
+ additional_context = (
113
+ "\n".join(additional_context_parts) if additional_context_parts else None
114
+ )
115
+
116
+ return should_continue, additional_context, results
117
+
118
+ def run_session_start(self) -> list[HookResult]:
119
+ """Run SessionStart hooks when a conversation begins."""
120
+ hooks = self.config.get_hooks_for_event(HookEventType.SESSION_START)
121
+ if not hooks:
122
+ return []
123
+
124
+ event = self._create_event(HookEventType.SESSION_START)
125
+ return self.executor.execute_all(hooks, event, stop_on_block=False)
126
+
127
+ def run_session_end(self) -> list[HookResult]:
128
+ """Run SessionEnd hooks when a conversation ends."""
129
+ hooks = self.config.get_hooks_for_event(HookEventType.SESSION_END)
130
+ if not hooks:
131
+ return []
132
+
133
+ event = self._create_event(HookEventType.SESSION_END)
134
+ return self.executor.execute_all(hooks, event, stop_on_block=False)
135
+
136
+ def run_stop(
137
+ self,
138
+ reason: str | None = None,
139
+ ) -> tuple[bool, list[HookResult]]:
140
+ """Run Stop hooks. Returns (should_stop, results)."""
141
+ hooks = self.config.get_hooks_for_event(HookEventType.STOP)
142
+ if not hooks:
143
+ return True, []
144
+
145
+ event = self._create_event(
146
+ HookEventType.STOP,
147
+ metadata={"reason": reason} if reason else {},
148
+ )
149
+
150
+ results = self.executor.execute_all(hooks, event, stop_on_block=True)
151
+
152
+ # If a hook blocks, the agent should NOT stop (continue running)
153
+ should_stop = all(r.should_continue for r in results)
154
+
155
+ return should_stop, results
156
+
157
+ def has_hooks(self, event_type: HookEventType) -> bool:
158
+ """Check if there are hooks configured for an event type."""
159
+ return self.config.has_hooks_for_event(event_type)
160
+
161
+ def get_blocking_reason(self, results: list[HookResult]) -> str | None:
162
+ """Get the reason for blocking from hook results."""
163
+ for result in results:
164
+ if result.blocked:
165
+ if result.reason:
166
+ return result.reason
167
+ if result.stderr:
168
+ return result.stderr.strip()
169
+ return "Blocked by hook"
170
+ return None
@@ -0,0 +1,40 @@
1
+ """Hook event types and data structures."""
2
+
3
+ from enum import Enum
4
+ from typing import Any
5
+
6
+ from pydantic import BaseModel, Field
7
+
8
+
9
+ class HookEventType(str, Enum):
10
+ """Types of hook events that can trigger hooks."""
11
+
12
+ PRE_TOOL_USE = "PreToolUse"
13
+ POST_TOOL_USE = "PostToolUse"
14
+ USER_PROMPT_SUBMIT = "UserPromptSubmit"
15
+ SESSION_START = "SessionStart"
16
+ SESSION_END = "SessionEnd"
17
+ STOP = "Stop"
18
+
19
+
20
+ class HookEvent(BaseModel):
21
+ """Data passed to hook scripts via stdin as JSON."""
22
+
23
+ event_type: HookEventType
24
+ tool_name: str | None = None
25
+ tool_input: dict[str, Any] | None = None
26
+ tool_response: dict[str, Any] | None = None
27
+ message: str | None = None
28
+ session_id: str | None = None
29
+ working_dir: str | None = None
30
+ metadata: dict[str, Any] = Field(default_factory=dict)
31
+
32
+ model_config = {"use_enum_values": True}
33
+
34
+
35
+ class HookDecision(str, Enum):
36
+ """Decisions a hook can make about an operation."""
37
+
38
+ ALLOW = "allow"
39
+ DENY = "deny"
40
+ # ASK = "ask" # Future: prompt user for confirmation before proceeding
@@ -0,0 +1,85 @@
1
+ from typing import Any
2
+
3
+ from cachetools import LRUCache
4
+
5
+ from openhands.sdk.logger import get_logger
6
+
7
+
8
+ logger = get_logger(__name__)
9
+
10
+
11
+ class MemoryLRUCache(LRUCache):
12
+ """LRU cache with both entry count and memory size limits.
13
+
14
+ This cache enforces two limits:
15
+ 1. Maximum number of entries (maxsize)
16
+ 2. Maximum memory usage in bytes (max_memory)
17
+
18
+ When either limit is exceeded, the least recently used items are evicted.
19
+
20
+ Note: Memory tracking is based on string length for simplicity and accuracy.
21
+ For non-string values, sys.getsizeof is used as a rough approximation.
22
+ """
23
+
24
+ def __init__(self, max_memory: int, max_size: int, *args, **kwargs):
25
+ # Ensure minimum maxsize of 1 to avoid LRUCache issues
26
+ maxsize = max(1, max_size)
27
+ super().__init__(maxsize=maxsize, *args, **kwargs)
28
+ self.max_memory = max_memory
29
+ self.current_memory = 0
30
+
31
+ def _get_size(self, value: Any) -> int:
32
+ """Calculate size of value for memory tracking.
33
+
34
+ For strings (the common case in FileStore), we use len() which gives
35
+ accurate character count. For other types, we use sys.getsizeof() as
36
+ a rough approximation.
37
+ """
38
+ if isinstance(value, str):
39
+ # For strings, len() gives character count which is what we care about
40
+ # This is much more accurate than sys.getsizeof for our use case
41
+ return len(value)
42
+ elif isinstance(value, bytes):
43
+ return len(value)
44
+ else:
45
+ # For other types, fall back to sys.getsizeof
46
+ # This is mainly for edge cases and won't be accurate for nested
47
+ # structures, but it's better than nothing
48
+ try:
49
+ import sys
50
+
51
+ return sys.getsizeof(value)
52
+ except Exception:
53
+ return 0
54
+
55
+ def __setitem__(self, key: Any, value: Any) -> None:
56
+ new_size = self._get_size(value)
57
+
58
+ # Don't cache items that are larger than max_memory
59
+ # This prevents cache thrashing where one huge item evicts everything
60
+ if new_size > self.max_memory:
61
+ logger.debug(
62
+ f"Item too large for cache ({new_size} bytes > "
63
+ f"{self.max_memory} bytes), skipping cache"
64
+ )
65
+ return
66
+
67
+ # Update memory accounting if key exists
68
+ if key in self:
69
+ old_value = self[key]
70
+ self.current_memory -= self._get_size(old_value)
71
+
72
+ self.current_memory += new_size
73
+
74
+ # Evict items until we're under memory limit
75
+ while self.current_memory > self.max_memory and len(self) > 0:
76
+ self.popitem()
77
+
78
+ super().__setitem__(key, value)
79
+
80
+ def __delitem__(self, key: Any) -> None:
81
+ if key in self:
82
+ old_value = self[key]
83
+ self.current_memory -= self._get_size(old_value)
84
+
85
+ super().__delitem__(key)
openhands/sdk/io/local.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os
2
2
  import shutil
3
3
 
4
+ from openhands.sdk.io.cache import MemoryLRUCache
4
5
  from openhands.sdk.logger import get_logger
5
6
  from openhands.sdk.observability.laminar import observe
6
7
 
@@ -12,13 +13,31 @@ logger = get_logger(__name__)
12
13
 
13
14
  class LocalFileStore(FileStore):
14
15
  root: str
16
+ cache: MemoryLRUCache
15
17
 
16
- def __init__(self, root: str):
18
+ def __init__(
19
+ self,
20
+ root: str,
21
+ cache_limit_size: int = 500,
22
+ cache_memory_size: int = 20 * 1024 * 1024,
23
+ ) -> None:
24
+ """Initialize a LocalFileStore with caching.
25
+
26
+ Args:
27
+ root: Root directory for file storage.
28
+ cache_limit_size: Maximum number of cached entries (default: 500).
29
+ cache_memory_size: Maximum cache memory in bytes (default: 20MB).
30
+
31
+ Note:
32
+ The cache assumes exclusive access to files. External modifications
33
+ to files will not be detected and may result in stale cache reads.
34
+ """
17
35
  if root.startswith("~"):
18
36
  root = os.path.expanduser(root)
19
37
  root = os.path.abspath(os.path.normpath(root))
20
38
  self.root = root
21
39
  os.makedirs(self.root, exist_ok=True)
40
+ self.cache = MemoryLRUCache(cache_memory_size, cache_limit_size)
22
41
 
23
42
  def get_full_path(self, path: str) -> str:
24
43
  # strip leading slash to keep relative under root
@@ -32,6 +51,7 @@ class LocalFileStore(FileStore):
32
51
  # ensure sandboxing
33
52
  if os.path.commonpath([self.root, full]) != self.root:
34
53
  raise ValueError(f"path escapes filestore root: {path}")
54
+
35
55
  return full
36
56
 
37
57
  @observe(name="LocalFileStore.write", span_type="TOOL")
@@ -41,14 +61,27 @@ class LocalFileStore(FileStore):
41
61
  if isinstance(contents, str):
42
62
  with open(full_path, "w", encoding="utf-8") as f:
43
63
  f.write(contents)
64
+ self.cache[full_path] = contents
44
65
  else:
45
66
  with open(full_path, "wb") as f:
46
67
  f.write(contents)
68
+ # Don't cache binary content - LocalFileStore is meant for JSON data
69
+ # If binary data is written and then read, it will error on read
47
70
 
48
71
  def read(self, path: str) -> str:
49
72
  full_path = self.get_full_path(path)
73
+
74
+ if full_path in self.cache:
75
+ return self.cache[full_path]
76
+
77
+ if not os.path.exists(full_path):
78
+ raise FileNotFoundError(path)
79
+
50
80
  with open(full_path, encoding="utf-8") as f:
51
- return f.read()
81
+ result = f.read()
82
+
83
+ self.cache[full_path] = result
84
+ return result
52
85
 
53
86
  @observe(name="LocalFileStore.list", span_type="TOOL")
54
87
  def list(self, path: str) -> list[str]:
@@ -72,11 +105,15 @@ class LocalFileStore(FileStore):
72
105
  if not os.path.exists(full_path):
73
106
  logger.debug(f"Local path does not exist: {full_path}")
74
107
  return
108
+
75
109
  if os.path.isfile(full_path):
76
110
  os.remove(full_path)
111
+ del self.cache[full_path]
77
112
  logger.debug(f"Removed local file: {full_path}")
78
113
  elif os.path.isdir(full_path):
79
114
  shutil.rmtree(full_path)
115
+ self.cache.clear()
80
116
  logger.debug(f"Removed local directory: {full_path}")
117
+
81
118
  except Exception as e:
82
119
  logger.error(f"Error clearing local file store: {str(e)}")
@@ -450,7 +450,8 @@ PLEASE follow the format strictly! PLEASE EMIT ONE AND ONLY ONE FUNCTION CALL PE
450
450
  """ # noqa: E501
451
451
 
452
452
  # Regex patterns for function call parsing
453
- FN_REGEX_PATTERN = r"<function=([^>]+)>\n(.*?)</function>"
453
+ # Note: newline after function name is optional for compatibility with various models
454
+ FN_REGEX_PATTERN = r"<function=([^>]+)>\n?(.*?)</function>"
454
455
  FN_PARAM_REGEX_PATTERN = r"<parameter=([^>]+)>(.*?)</parameter>"
455
456
 
456
457
  # Add new regex pattern for tool execution results
@@ -702,7 +703,7 @@ def convert_fncall_messages_to_non_fncall_messages(
702
703
  first_user_message_encountered = False
703
704
  for message in messages:
704
705
  role = message["role"]
705
- content: Content = message["content"]
706
+ content: Content = message.get("content") or ""
706
707
 
707
708
  # 1. SYSTEM MESSAGES
708
709
  # append system prompt suffix to content
@@ -880,6 +881,9 @@ def _extract_and_validate_params(
880
881
  for param_match in param_matches:
881
882
  param_name = param_match.group(1)
882
883
  param_value = param_match.group(2)
884
+ # Normalize whitespace: some models add extra newlines around values
885
+ if isinstance(param_value, str):
886
+ param_value = param_value.strip()
883
887
 
884
888
  # Validate parameter is allowed
885
889
  if allowed_params and param_name not in allowed_params:
@@ -927,7 +931,11 @@ def _extract_and_validate_params(
927
931
  found_params.add(param_name)
928
932
 
929
933
  # Check all required parameters are present
930
- missing_params = required_params - found_params
934
+ # Note: security_risk is excluded here because its validation happens later
935
+ # in Agent._extract_security_risk(), which has context about whether a security
936
+ # analyzer is configured. This allows weaker models to omit it when no analyzer
937
+ # is active, while still enforcing it for stronger models with LLMSecurityAnalyzer.
938
+ missing_params = required_params - found_params - {"security_risk"}
931
939
  if missing_params:
932
940
  raise FunctionCallValidationError(
933
941
  f"Missing required parameters for function '{fn_name}': {missing_params}"
@@ -935,12 +943,31 @@ def _extract_and_validate_params(
935
943
  return params
936
944
 
937
945
 
946
+ def _preprocess_model_output(content: str) -> str:
947
+ """Clean up model-specific formatting before parsing function calls.
948
+
949
+ Removes wrapper tags that some models (like Nemotron) emit around function calls:
950
+ - </think> before the function call
951
+ - <tool_call>...</tool_call> around the function call
952
+
953
+ Only strips tags at boundaries, not inside parameter values.
954
+ """
955
+ # Strip </think> when it appears before <function= (Nemotron reasoning end)
956
+ content = re.sub(r"</think>\s*(?=<function=)", "", content)
957
+ # Strip <tool_call> when it appears right before <function=
958
+ content = re.sub(r"<tool_call>\s*(?=<function=)", "", content)
959
+ # Strip </tool_call> when it appears right after </function>
960
+ content = re.sub(r"(?<=</function>)\s*</tool_call>", "", content)
961
+ return content
962
+
963
+
938
964
  def _fix_stopword(content: str) -> str:
939
965
  """Fix the issue when some LLM would NOT return the stopword."""
966
+ content = _preprocess_model_output(content)
940
967
  if "<function=" in content and content.count("<function=") == 1:
941
968
  if content.endswith("</"):
942
969
  content = content.rstrip() + "function>"
943
- else:
970
+ elif not content.rstrip().endswith("</function>"):
944
971
  content = content + "\n</function>"
945
972
  return content
946
973
 
@@ -981,8 +1008,8 @@ def convert_non_fncall_messages_to_fncall_messages(
981
1008
 
982
1009
  first_user_message_encountered = False
983
1010
  for message in messages:
984
- role, content = message["role"], message["content"]
985
- content = content or "" # handle cases where content is None
1011
+ role = message["role"]
1012
+ content = message.get("content") or ""
986
1013
  # For system messages, remove the added suffix
987
1014
  if role == "system":
988
1015
  if isinstance(content, str):
@@ -1124,15 +1151,32 @@ def convert_non_fncall_messages_to_fncall_messages(
1124
1151
  if fn_match:
1125
1152
  fn_name = fn_match.group(1)
1126
1153
  fn_body = _normalize_parameter_tags(fn_match.group(2))
1127
- matching_tool: ChatCompletionToolParamFunctionChunk | None = next(
1128
- (
1129
- tool["function"]
1130
- for tool in tools
1131
- if tool["type"] == "function"
1132
- and tool["function"]["name"] == fn_name
1133
- ),
1134
- None,
1135
- )
1154
+
1155
+ def _find_tool(
1156
+ name: str,
1157
+ ) -> ChatCompletionToolParamFunctionChunk | None:
1158
+ return next(
1159
+ (
1160
+ tool["function"]
1161
+ for tool in tools
1162
+ if tool["type"] == "function"
1163
+ and tool["function"]["name"] == name
1164
+ ),
1165
+ None,
1166
+ )
1167
+
1168
+ matching_tool = _find_tool(fn_name)
1169
+ # Try aliases if tool not found (some models use legacy names)
1170
+ if not matching_tool:
1171
+ TOOL_NAME_ALIASES = {
1172
+ "str_replace_editor": "file_editor",
1173
+ "bash": "terminal",
1174
+ "execute_bash": "terminal",
1175
+ "str_replace": "file_editor",
1176
+ }
1177
+ if fn_name in TOOL_NAME_ALIASES:
1178
+ fn_name = TOOL_NAME_ALIASES[fn_name]
1179
+ matching_tool = _find_tool(fn_name)
1136
1180
  # Validate function exists in tools
1137
1181
  if not matching_tool:
1138
1182
  available_tools = [
@@ -1203,7 +1247,8 @@ def convert_from_multiple_tool_calls_to_single_tool_call_messages(
1203
1247
  for message in messages:
1204
1248
  role: str
1205
1249
  content: Content
1206
- role, content = message["role"], message["content"]
1250
+ role = message["role"]
1251
+ content = message.get("content") or ""
1207
1252
  if role == "assistant":
1208
1253
  if message.get("tool_calls") and len(message["tool_calls"]) > 1:
1209
1254
  # handle multiple tool calls by breaking them into multiple messages
@@ -41,7 +41,11 @@ class NonNativeToolCallingMixin:
41
41
  kwargs: dict,
42
42
  ) -> tuple[list[dict], dict]:
43
43
  """Convert to non-fncall prompting when native tool-calling is off."""
44
- add_iclex = not any(s in self.model for s in ("openhands-lm", "devstral"))
44
+ # Skip in-context learning examples for models that understand the format
45
+ # or have limited context windows
46
+ add_iclex = not any(
47
+ s in self.model for s in ("openhands-lm", "devstral", "nemotron")
48
+ )
45
49
  messages = convert_fncall_messages_to_non_fncall_messages(
46
50
  messages, tools, add_in_context_learning_example=add_iclex
47
51
  )
@@ -22,6 +22,16 @@ S = TypeVar("S", bound="Schema")
22
22
  def py_type(spec: dict[str, Any]) -> Any:
23
23
  """Map JSON schema types to Python types."""
24
24
  t = spec.get("type")
25
+
26
+ # Normalize union types like ["string", "null"] to a single representative type.
27
+ # MCP schemas often mark optional fields this way; we keep the non-null type.
28
+ if isinstance(t, (list, tuple, set)):
29
+ types = list(t)
30
+ non_null = [tp for tp in types if tp != "null"]
31
+ if len(non_null) == 1:
32
+ t = non_null[0]
33
+ else:
34
+ return Any
25
35
  if t == "array":
26
36
  items = spec.get("items", {})
27
37
  inner = py_type(items) if isinstance(items, dict) else Any
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openhands-sdk
3
- Version: 1.7.0
3
+ Version: 1.7.1
4
4
  Summary: OpenHands SDK - Core functionality for building AI agents
5
5
  Requires-Python: >=3.12
6
6
  Requires-Dist: deprecation>=2.1.0