openhands-sdk 1.9.1__py3-none-any.whl → 1.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. openhands/sdk/agent/agent.py +54 -13
  2. openhands/sdk/agent/base.py +32 -45
  3. openhands/sdk/context/condenser/llm_summarizing_condenser.py +0 -23
  4. openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +1 -5
  5. openhands/sdk/context/view.py +108 -122
  6. openhands/sdk/conversation/__init__.py +2 -0
  7. openhands/sdk/conversation/conversation.py +13 -3
  8. openhands/sdk/conversation/exceptions.py +18 -0
  9. openhands/sdk/conversation/impl/local_conversation.py +192 -23
  10. openhands/sdk/conversation/impl/remote_conversation.py +141 -12
  11. openhands/sdk/critic/impl/api/critic.py +10 -7
  12. openhands/sdk/event/condenser.py +52 -2
  13. openhands/sdk/git/cached_repo.py +19 -0
  14. openhands/sdk/hooks/__init__.py +2 -0
  15. openhands/sdk/hooks/config.py +44 -4
  16. openhands/sdk/hooks/executor.py +2 -1
  17. openhands/sdk/llm/llm.py +47 -13
  18. openhands/sdk/llm/message.py +65 -27
  19. openhands/sdk/llm/options/chat_options.py +2 -1
  20. openhands/sdk/mcp/client.py +53 -6
  21. openhands/sdk/mcp/tool.py +24 -21
  22. openhands/sdk/mcp/utils.py +31 -23
  23. openhands/sdk/plugin/__init__.py +12 -1
  24. openhands/sdk/plugin/fetch.py +118 -14
  25. openhands/sdk/plugin/loader.py +111 -0
  26. openhands/sdk/plugin/plugin.py +155 -13
  27. openhands/sdk/plugin/types.py +163 -1
  28. openhands/sdk/utils/__init__.py +2 -0
  29. openhands/sdk/utils/async_utils.py +36 -1
  30. openhands/sdk/utils/command.py +28 -1
  31. {openhands_sdk-1.9.1.dist-info → openhands_sdk-1.10.0.dist-info}/METADATA +1 -1
  32. {openhands_sdk-1.9.1.dist-info → openhands_sdk-1.10.0.dist-info}/RECORD +34 -33
  33. {openhands_sdk-1.9.1.dist-info → openhands_sdk-1.10.0.dist-info}/WHEEL +1 -1
  34. {openhands_sdk-1.9.1.dist-info → openhands_sdk-1.10.0.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from pydantic import Field
2
4
  from rich.text import Text
3
5
 
@@ -22,8 +24,9 @@ class Condensation(Event):
22
24
  summary_offset: int | None = Field(
23
25
  default=None,
24
26
  ge=0,
25
- description="An optional offset to the start of the resulting view"
26
- " indicating where the summary should be inserted.",
27
+ description="An optional offset to the start of the resulting view (after"
28
+ " forgotten events have been removed) indicating where the summary should be"
29
+ " inserted. If not provided, the summary will not be inserted into the view.",
27
30
  )
28
31
  llm_response_id: EventID = Field(
29
32
  description=(
@@ -45,6 +48,53 @@ class Condensation(Event):
45
48
  text.append(f"{self.summary}\n")
46
49
  return text
47
50
 
51
+ @property
52
+ def summary_event(self) -> CondensationSummaryEvent:
53
+ """Generates a CondensationSummaryEvent.
54
+
55
+ Since summary events are not part of the main event store and are generated
56
+ dynamically, this property ensures the created event has a unique and consistent
57
+ ID based on the condensation event's ID.
58
+
59
+ Raises:
60
+ ValueError: If no summary is present.
61
+ """
62
+ if self.summary is None:
63
+ raise ValueError("No summary present to generate CondensationSummaryEvent.")
64
+
65
+ # Create a deterministic ID for the summary event.
66
+ # This ID will be unique amongst all auto-generated IDs (by virtue of the
67
+ # "-summary" suffix).
68
+ # These events are not intended to be stored alongside regular events, but the
69
+ # ID is still compatible with the file-based event store.
70
+ summary_id = f"{self.id}-summary"
71
+
72
+ return CondensationSummaryEvent(
73
+ id=summary_id,
74
+ summary=self.summary,
75
+ source=self.source,
76
+ )
77
+
78
+ @property
79
+ def has_summary_metadata(self) -> bool:
80
+ """Checks if both summary and summary_offset are present."""
81
+ return self.summary is not None and self.summary_offset is not None
82
+
83
+ def apply(self, events: list[LLMConvertibleEvent]) -> list[LLMConvertibleEvent]:
84
+ """Applies the condensation to a list of events.
85
+
86
+ This method removes events that are marked to be forgotten and returns a new
87
+ list of events. If the summary metadata is present (both summary and offset),
88
+ the corresponding CondensationSummaryEvent will be inserted at the specified
89
+ offset _after_ the forgotten events have been removed.
90
+ """
91
+ output = [event for event in events if event.id not in self.forgotten_event_ids]
92
+ if self.has_summary_metadata:
93
+ assert self.summary_offset is not None
94
+ summary_event = self.summary_event
95
+ output.insert(self.summary_offset, summary_event)
96
+ return output
97
+
48
98
 
49
99
  class CondensationRequest(Event):
50
100
  """This action is used to request a condensation of the conversation history.
@@ -170,6 +170,25 @@ class GitHelper:
170
170
  # origin/HEAD may not be set (e.g., bare clone, or never configured)
171
171
  return None
172
172
 
173
+ def get_head_commit(self, repo_path: Path, timeout: int = 10) -> str:
174
+ """Get the current HEAD commit SHA.
175
+
176
+ Args:
177
+ repo_path: Path to the repository.
178
+ timeout: Timeout in seconds.
179
+
180
+ Returns:
181
+ Full 40-character commit SHA of HEAD.
182
+
183
+ Raises:
184
+ GitCommandError: If command fails.
185
+ """
186
+ return run_git_command(
187
+ ["git", "rev-parse", "HEAD"],
188
+ cwd=repo_path,
189
+ timeout=timeout,
190
+ )
191
+
173
192
 
174
193
  def try_cached_clone_or_update(
175
194
  url: str,
@@ -6,6 +6,7 @@ during agent execution, enabling deterministic control over agent behavior.
6
6
  """
7
7
 
8
8
  from openhands.sdk.hooks.config import (
9
+ HOOK_EVENT_FIELDS,
9
10
  HookConfig,
10
11
  HookDefinition,
11
12
  HookMatcher,
@@ -21,6 +22,7 @@ from openhands.sdk.hooks.types import HookDecision, HookEvent, HookEventType
21
22
 
22
23
 
23
24
  __all__ = [
25
+ "HOOK_EVENT_FIELDS",
24
26
  "HookConfig",
25
27
  "HookDefinition",
26
28
  "HookMatcher",
@@ -22,8 +22,9 @@ def _pascal_to_snake(name: str) -> str:
22
22
  return result
23
23
 
24
24
 
25
- # Valid snake_case field names for hook events
26
- _VALID_HOOK_FIELDS: frozenset[str] = frozenset(
25
+ # Valid snake_case field names for hook events.
26
+ # This is the single source of truth for hook event types.
27
+ HOOK_EVENT_FIELDS: frozenset[str] = frozenset(
27
28
  {
28
29
  "pre_tool_use",
29
30
  "post_tool_use",
@@ -188,8 +189,8 @@ class HookConfig(BaseModel):
188
189
 
189
190
  if is_pascal_case:
190
191
  # Validate that PascalCase key maps to a known field
191
- if snake_key not in _VALID_HOOK_FIELDS:
192
- valid_types = ", ".join(sorted(_VALID_HOOK_FIELDS))
192
+ if snake_key not in HOOK_EVENT_FIELDS:
193
+ valid_types = ", ".join(sorted(HOOK_EVENT_FIELDS))
193
194
  raise ValueError(
194
195
  f"Unknown event type '{key}'. Valid types: {valid_types}"
195
196
  )
@@ -287,3 +288,42 @@ class HookConfig(BaseModel):
287
288
 
288
289
  with open(path, "w") as f:
289
290
  json.dump(self.model_dump(mode="json", exclude_defaults=True), f, indent=2)
291
+
292
+ @classmethod
293
+ def merge(cls, configs: list["HookConfig"]) -> "HookConfig | None":
294
+ """Merge multiple hook configs by concatenating handlers per event type.
295
+
296
+ Each hook config may have multiple event types (pre_tool_use,
297
+ post_tool_use, etc.). This method combines all matchers from all
298
+ configs for each event type.
299
+
300
+ Args:
301
+ configs: List of HookConfig objects to merge.
302
+
303
+ Returns:
304
+ A merged HookConfig with all matchers concatenated, or None if no configs
305
+ or if the result is empty.
306
+
307
+ Example:
308
+ >>> config1 = HookConfig(pre_tool_use=[HookMatcher(matcher="*")])
309
+ >>> config2 = HookConfig(pre_tool_use=[HookMatcher(matcher="terminal")])
310
+ >>> merged = HookConfig.merge([config1, config2])
311
+ >>> len(merged.pre_tool_use) # Both matchers combined
312
+ 2
313
+ """
314
+ if not configs:
315
+ return None
316
+
317
+ # Collect all matchers by event type using the canonical field list
318
+ collected: dict[str, list] = {field: [] for field in HOOK_EVENT_FIELDS}
319
+ for config in configs:
320
+ for field in HOOK_EVENT_FIELDS:
321
+ collected[field].extend(getattr(config, field))
322
+
323
+ merged = cls(**collected)
324
+
325
+ # Return None if the merged config is empty
326
+ if merged.is_empty():
327
+ return None
328
+
329
+ return merged
@@ -8,6 +8,7 @@ from pydantic import BaseModel
8
8
 
9
9
  from openhands.sdk.hooks.config import HookDefinition
10
10
  from openhands.sdk.hooks.types import HookDecision, HookEvent
11
+ from openhands.sdk.utils import sanitized_env
11
12
 
12
13
 
13
14
  class HookResult(BaseModel):
@@ -50,7 +51,7 @@ class HookExecutor:
50
51
  ) -> HookResult:
51
52
  """Execute a single hook."""
52
53
  # Prepare environment
53
- hook_env = os.environ.copy()
54
+ hook_env = sanitized_env()
54
55
  hook_env["OPENHANDS_PROJECT_DIR"] = self.working_dir
55
56
  hook_env["OPENHANDS_SESSION_ID"] = event.session_id or ""
56
57
  hook_env["OPENHANDS_EVENT_TYPE"] = event.event_type
openhands/sdk/llm/llm.py CHANGED
@@ -22,6 +22,7 @@ from pydantic import (
22
22
  from pydantic.json_schema import SkipJsonSchema
23
23
 
24
24
  from openhands.sdk.llm.utils.model_info import get_litellm_model_info
25
+ from openhands.sdk.utils.deprecation import warn_deprecated
25
26
  from openhands.sdk.utils.pydantic_secrets import serialize_secret, validate_secret
26
27
 
27
28
 
@@ -283,10 +284,15 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
283
284
  seed: int | None = Field(
284
285
  default=None, description="The seed to use for random number generation."
285
286
  )
287
+ # REMOVE_AT: 1.15.0 - Remove this field and its handling in chat_options.py
286
288
  safety_settings: list[dict[str, str]] | None = Field(
287
289
  default=None,
288
290
  description=(
289
- "Safety settings for models that support them (like Mistral AI and Gemini)"
291
+ "Deprecated: Safety settings for models that support them "
292
+ "(like Mistral AI and Gemini). This field is deprecated in 1.10.0 "
293
+ "and will be removed in 1.15.0. Safety settings are designed for "
294
+ "consumer-facing content moderation, which is not relevant for "
295
+ "coding agents."
290
296
  ),
291
297
  )
292
298
  usage_id: str = Field(
@@ -342,6 +348,26 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
342
348
  def _validate_secrets(cls, v: str | SecretStr | None, info) -> SecretStr | None:
343
349
  return validate_secret(v, info)
344
350
 
351
+ # REMOVE_AT: 1.15.0 - Remove this validator
352
+ @field_validator("safety_settings", mode="before")
353
+ @classmethod
354
+ def _warn_safety_settings_deprecated(
355
+ cls, v: list[dict[str, str]] | None
356
+ ) -> list[dict[str, str]] | None:
357
+ """Emit deprecation warning when safety_settings is explicitly set."""
358
+ if v is not None:
359
+ warn_deprecated(
360
+ "LLM.safety_settings",
361
+ deprecated_in="1.10.0",
362
+ removed_in="1.15.0",
363
+ details=(
364
+ "Safety settings are designed for consumer-facing content "
365
+ "moderation, which is not relevant for coding agents."
366
+ ),
367
+ stacklevel=4,
368
+ )
369
+ return v
370
+
345
371
  @model_validator(mode="before")
346
372
  @classmethod
347
373
  def _coerce_inputs(cls, data):
@@ -989,19 +1015,27 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
989
1015
  if self.is_caching_prompt_active():
990
1016
  self._apply_prompt_caching(messages)
991
1017
 
992
- for message in messages:
993
- message.cache_enabled = self.is_caching_prompt_active()
994
- message.vision_enabled = self.vision_is_active()
995
- message.function_calling_enabled = self.native_tool_calling
996
- model_features = get_features(self._model_name_for_capabilities())
997
- message.force_string_serializer = (
998
- self.force_string_serializer
999
- if self.force_string_serializer is not None
1000
- else model_features.force_string_serializer
1018
+ model_features = get_features(self._model_name_for_capabilities())
1019
+ cache_enabled = self.is_caching_prompt_active()
1020
+ vision_enabled = self.vision_is_active()
1021
+ function_calling_enabled = self.native_tool_calling
1022
+ force_string_serializer = (
1023
+ self.force_string_serializer
1024
+ if self.force_string_serializer is not None
1025
+ else model_features.force_string_serializer
1026
+ )
1027
+ send_reasoning_content = model_features.send_reasoning_content
1028
+
1029
+ formatted_messages = [
1030
+ message.to_chat_dict(
1031
+ cache_enabled=cache_enabled,
1032
+ vision_enabled=vision_enabled,
1033
+ function_calling_enabled=function_calling_enabled,
1034
+ force_string_serializer=force_string_serializer,
1035
+ send_reasoning_content=send_reasoning_content,
1001
1036
  )
1002
- message.send_reasoning_content = model_features.send_reasoning_content
1003
-
1004
- formatted_messages = [message.to_chat_dict() for message in messages]
1037
+ for message in messages
1038
+ ]
1005
1039
 
1006
1040
  return formatted_messages
1007
1041
 
@@ -11,10 +11,11 @@ from litellm.types.responses.main import (
11
11
  from litellm.types.utils import Message as LiteLLMMessage
12
12
  from openai.types.responses.response_output_message import ResponseOutputMessage
13
13
  from openai.types.responses.response_reasoning_item import ResponseReasoningItem
14
- from pydantic import BaseModel, ConfigDict, Field, field_validator
14
+ from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
15
15
 
16
16
  from openhands.sdk.logger import get_logger
17
17
  from openhands.sdk.utils import DEFAULT_TEXT_CONTENT_LIMIT, maybe_truncate
18
+ from openhands.sdk.utils.deprecation import warn_deprecated
18
19
 
19
20
 
20
21
  logger = get_logger(__name__)
@@ -209,30 +210,11 @@ class Message(BaseModel):
209
210
  # These are the roles in the LLM's APIs
210
211
  role: Literal["user", "system", "assistant", "tool"]
211
212
  content: Sequence[TextContent | ImageContent] = Field(default_factory=list)
212
- cache_enabled: bool = False
213
- vision_enabled: bool = False
214
- # function calling
215
- function_calling_enabled: bool = False
216
213
  # - tool calls (from LLM)
217
214
  tool_calls: list[MessageToolCall] | None = None
218
215
  # - tool execution result (to LLM)
219
216
  tool_call_id: str | None = None
220
217
  name: str | None = None # name of the tool
221
- force_string_serializer: bool = Field(
222
- default=False,
223
- description=(
224
- "Force using string content serializer when sending to LLM API. "
225
- "Useful for providers that do not support list content, "
226
- "like HuggingFace and Groq."
227
- ),
228
- )
229
- send_reasoning_content: bool = Field(
230
- default=False,
231
- description=(
232
- "Whether to include the full reasoning content when sending to the LLM. "
233
- "Useful for models that support extended reasoning, like Kimi-K2-thinking."
234
- ),
235
- )
236
218
  # reasoning content (from reasoning models like o1, Claude thinking, DeepSeek R1)
237
219
  reasoning_content: str | None = Field(
238
220
  default=None,
@@ -249,6 +231,47 @@ class Message(BaseModel):
249
231
  description="OpenAI Responses reasoning item from model output",
250
232
  )
251
233
 
234
+ # Deprecated fields that were moved to to_chat_dict() parameters.
235
+ # These fields are ignored but accepted for backward compatibility.
236
+ # REMOVE_AT: 1.12.0 - Remove this list and the _handle_deprecated_fields validator
237
+ _DEPRECATED_FIELDS: ClassVar[tuple[str, ...]] = (
238
+ "cache_enabled",
239
+ "vision_enabled",
240
+ "function_calling_enabled",
241
+ "force_string_serializer",
242
+ "send_reasoning_content",
243
+ )
244
+
245
+ model_config = ConfigDict(extra="ignore")
246
+
247
+ @model_validator(mode="before")
248
+ @classmethod
249
+ def _handle_deprecated_fields(cls, data: Any) -> Any:
250
+ """Handle deprecated fields by emitting warnings and removing them.
251
+
252
+ REMOVE_AT: 1.12.0 - Remove this validator along with _DEPRECATED_FIELDS
253
+ """
254
+ if not isinstance(data, dict):
255
+ return data
256
+
257
+ deprecated_found = [f for f in cls._DEPRECATED_FIELDS if f in data]
258
+ for field in deprecated_found:
259
+ warn_deprecated(
260
+ f"Message.{field}",
261
+ deprecated_in="1.9.1",
262
+ removed_in="1.12.0",
263
+ details=(
264
+ f"The '{field}' field has been removed from Message. "
265
+ "Pass it as a parameter to to_chat_dict() instead, or use "
266
+ "LLM.format_messages_for_llm() which handles this automatically."
267
+ ),
268
+ stacklevel=4, # Adjust for validator call depth
269
+ )
270
+ # Remove the deprecated field so Pydantic doesn't complain
271
+ del data[field]
272
+
273
+ return data
274
+
252
275
  @property
253
276
  def contains_image(self) -> bool:
254
277
  return any(isinstance(content, ImageContent) for content in self.content)
@@ -264,17 +287,32 @@ class Message(BaseModel):
264
287
  return [TextContent(text=v)]
265
288
  return v
266
289
 
267
- def to_chat_dict(self) -> dict[str, Any]:
290
+ def to_chat_dict(
291
+ self,
292
+ *,
293
+ cache_enabled: bool,
294
+ vision_enabled: bool,
295
+ function_calling_enabled: bool,
296
+ force_string_serializer: bool,
297
+ send_reasoning_content: bool,
298
+ ) -> dict[str, Any]:
268
299
  """Serialize message for OpenAI Chat Completions.
269
300
 
301
+ Args:
302
+ cache_enabled: Whether prompt caching is active.
303
+ vision_enabled: Whether vision/image processing is enabled.
304
+ function_calling_enabled: Whether native function calling is enabled.
305
+ force_string_serializer: Force string serializer instead of list format.
306
+ send_reasoning_content: Whether to include reasoning_content in output.
307
+
270
308
  Chooses the appropriate content serializer and then injects threading keys:
271
309
  - Assistant tool call turn: role == "assistant" and self.tool_calls
272
310
  - Tool result turn: role == "tool" and self.tool_call_id (with name)
273
311
  """
274
- if not self.force_string_serializer and (
275
- self.cache_enabled or self.vision_enabled or self.function_calling_enabled
312
+ if not force_string_serializer and (
313
+ cache_enabled or vision_enabled or function_calling_enabled
276
314
  ):
277
- message_dict = self._list_serializer()
315
+ message_dict = self._list_serializer(vision_enabled=vision_enabled)
278
316
  else:
279
317
  # some providers, like HF and Groq/llama, don't support a list here, but a
280
318
  # single string
@@ -294,7 +332,7 @@ class Message(BaseModel):
294
332
  message_dict["name"] = self.name
295
333
 
296
334
  # Required for model like kimi-k2-thinking
297
- if self.send_reasoning_content and self.reasoning_content:
335
+ if send_reasoning_content and self.reasoning_content:
298
336
  message_dict["reasoning_content"] = self.reasoning_content
299
337
 
300
338
  return message_dict
@@ -309,7 +347,7 @@ class Message(BaseModel):
309
347
  # tool call keys are added in to_chat_dict to centralize behavior
310
348
  return message_dict
311
349
 
312
- def _list_serializer(self) -> dict[str, Any]:
350
+ def _list_serializer(self, *, vision_enabled: bool) -> dict[str, Any]:
313
351
  content: list[dict[str, Any]] = []
314
352
  role_tool_with_prompt_caching = False
315
353
 
@@ -337,7 +375,7 @@ class Message(BaseModel):
337
375
  d.pop("cache_control", None)
338
376
 
339
377
  # Handle vision-enabled filtering for ImageContent
340
- if isinstance(item, ImageContent) and self.vision_enabled:
378
+ if isinstance(item, ImageContent) and vision_enabled:
341
379
  content.extend(item_dicts)
342
380
  elif not isinstance(item, ImageContent):
343
381
  # Add non-image content (TextContent, etc.)
@@ -71,7 +71,8 @@ def select_chat_options(
71
71
  out.pop("temperature", None)
72
72
  out.pop("top_p", None)
73
73
 
74
- # Mistral / Gemini safety
74
+ # REMOVE_AT: 1.15.0 - Remove this block along with LLM.safety_settings field
75
+ # Mistral / Gemini safety (deprecated)
75
76
  if llm.safety_settings:
76
77
  ml = llm.model.lower()
77
78
  if "mistral" in ml or "gemini" in ml:
@@ -2,27 +2,53 @@
2
2
 
3
3
  import asyncio
4
4
  import inspect
5
- from collections.abc import Callable
6
- from typing import Any
5
+ from collections.abc import Callable, Iterator
6
+ from typing import TYPE_CHECKING, Any
7
7
 
8
8
  from fastmcp import Client as AsyncMCPClient
9
9
 
10
10
  from openhands.sdk.utils.async_executor import AsyncExecutor
11
11
 
12
12
 
13
+ if TYPE_CHECKING:
14
+ from openhands.sdk.mcp.tool import MCPToolDefinition
15
+
16
+
13
17
  class MCPClient(AsyncMCPClient):
14
- """
15
- Behaves exactly like fastmcp.Client (same constructor & async API),
16
- but owns a background event loop and offers:
18
+ """MCP client with sync helpers and lifecycle management.
19
+
20
+ Extends fastmcp.Client with:
17
21
  - call_async_from_sync(awaitable_or_fn, *args, timeout=None, **kwargs)
18
22
  - call_sync_from_async(fn, *args, **kwargs) # await this from async code
23
+
24
+ After create_mcp_tools() populates it, use as a sync context manager:
25
+
26
+ with create_mcp_tools(config) as client:
27
+ for tool in client.tools:
28
+ # use tool
29
+ # Connection automatically closed
30
+
31
+ Or manage lifecycle manually by calling sync_close() when done.
19
32
  """
20
33
 
21
34
  _executor: AsyncExecutor
35
+ _closed: bool
36
+ _tools: "list[MCPToolDefinition]"
22
37
 
23
38
  def __init__(self, *args, **kwargs):
24
39
  super().__init__(*args, **kwargs)
25
40
  self._executor = AsyncExecutor()
41
+ self._closed = False
42
+ self._tools = []
43
+
44
+ @property
45
+ def tools(self) -> "list[MCPToolDefinition]":
46
+ """The MCP tools using this client connection (returns a copy)."""
47
+ return list(self._tools)
48
+
49
+ async def connect(self) -> None:
50
+ """Establish connection to the MCP server."""
51
+ await self.__aenter__()
26
52
 
27
53
  def call_async_from_sync(
28
54
  self,
@@ -56,8 +82,11 @@ class MCPClient(AsyncMCPClient):
56
82
  Synchronously close the MCP client and cleanup resources.
57
83
 
58
84
  This will attempt to call the async close() method if available,
59
- then shutdown the background event loop.
85
+ then shutdown the background event loop. Safe to call multiple times.
60
86
  """
87
+ if self._closed:
88
+ return
89
+
61
90
  # Best-effort: try async close if parent provides it
62
91
  if hasattr(self, "close") and inspect.iscoroutinefunction(self.close):
63
92
  try:
@@ -67,6 +96,7 @@ class MCPClient(AsyncMCPClient):
67
96
 
68
97
  # Always cleanup the executor
69
98
  self._executor.close()
99
+ self._closed = True
70
100
 
71
101
  def __del__(self):
72
102
  """Cleanup on deletion."""
@@ -74,3 +104,20 @@ class MCPClient(AsyncMCPClient):
74
104
  self.sync_close()
75
105
  except Exception:
76
106
  pass # Ignore cleanup errors during deletion
107
+
108
+ # Sync context manager support
109
+ def __enter__(self) -> "MCPClient":
110
+ return self
111
+
112
+ def __exit__(self, *args: object) -> None:
113
+ self.sync_close()
114
+
115
+ # Iteration support for tools
116
+ def __iter__(self) -> "Iterator[MCPToolDefinition]":
117
+ return iter(self._tools)
118
+
119
+ def __len__(self) -> int:
120
+ return len(self._tools)
121
+
122
+ def __getitem__(self, index: int) -> "MCPToolDefinition":
123
+ return self._tools[index]
openhands/sdk/mcp/tool.py CHANGED
@@ -52,27 +52,30 @@ class MCPToolExecutor(ToolExecutor):
52
52
 
53
53
  @observe(name="MCPToolExecutor.call_tool", span_type="TOOL")
54
54
  async def call_tool(self, action: MCPToolAction) -> MCPToolObservation:
55
- async with self.client:
56
- assert self.client.is_connected(), "MCP client is not connected."
57
- try:
58
- logger.debug(
59
- f"Calling MCP tool {self.tool_name} "
60
- f"with args: {action.model_dump()}"
61
- )
62
- result: mcp.types.CallToolResult = await self.client.call_tool_mcp(
63
- name=self.tool_name, arguments=action.to_mcp_arguments()
64
- )
65
- return MCPToolObservation.from_call_tool_result(
66
- tool_name=self.tool_name, result=result
67
- )
68
- except Exception as e:
69
- error_msg = f"Error calling MCP tool {self.tool_name}: {str(e)}"
70
- logger.error(error_msg, exc_info=True)
71
- return MCPToolObservation.from_text(
72
- text=error_msg,
73
- is_error=True,
74
- tool_name=self.tool_name,
75
- )
55
+ """Execute the MCP tool call using the already-connected client."""
56
+ if not self.client.is_connected():
57
+ raise RuntimeError(
58
+ f"MCP client not connected for tool '{self.tool_name}'. "
59
+ "The connection may have been closed or failed to establish."
60
+ )
61
+ try:
62
+ logger.debug(
63
+ f"Calling MCP tool {self.tool_name} with args: {action.model_dump()}"
64
+ )
65
+ result: mcp.types.CallToolResult = await self.client.call_tool_mcp(
66
+ name=self.tool_name, arguments=action.to_mcp_arguments()
67
+ )
68
+ return MCPToolObservation.from_call_tool_result(
69
+ tool_name=self.tool_name, result=result
70
+ )
71
+ except Exception as e:
72
+ error_msg = f"Error calling MCP tool {self.tool_name}: {str(e)}"
73
+ logger.error(error_msg, exc_info=True)
74
+ return MCPToolObservation.from_text(
75
+ text=error_msg,
76
+ is_error=True,
77
+ tool_name=self.tool_name,
78
+ )
76
79
 
77
80
  def __call__(
78
81
  self,