openhands-sdk 1.7.3__py3-none-any.whl → 1.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -489,7 +489,23 @@ class RemoteConversation(BaseConversation):
489
489
  self._hook_processor = None
490
490
  self._cleanup_initiated = False
491
491
 
492
- if conversation_id is None:
492
+ should_create = conversation_id is None
493
+ if conversation_id is not None:
494
+ # Try to attach to existing conversation
495
+ resp = _send_request(
496
+ self._client,
497
+ "GET",
498
+ f"/api/conversations/{conversation_id}",
499
+ acceptable_status_codes={404},
500
+ )
501
+ if resp.status_code == 404:
502
+ # Conversation doesn't exist, we'll create it
503
+ should_create = True
504
+ else:
505
+ # Conversation exists, use the provided ID
506
+ self._id = conversation_id
507
+
508
+ if should_create:
493
509
  # Import here to avoid circular imports
494
510
  from openhands.sdk.tool.registry import get_tool_module_qualnames
495
511
 
@@ -518,6 +534,9 @@ class RemoteConversation(BaseConversation):
518
534
  else:
519
535
  threshold_config = stuck_detection_thresholds
520
536
  payload["stuck_detection_thresholds"] = threshold_config.model_dump()
537
+ # Include conversation_id if provided (for creating with specific ID)
538
+ if conversation_id is not None:
539
+ payload["conversation_id"] = str(conversation_id)
521
540
  resp = _send_request(
522
541
  self._client, "POST", "/api/conversations", json=payload
523
542
  )
@@ -529,11 +548,6 @@ class RemoteConversation(BaseConversation):
529
548
  "Invalid response from server: missing conversation id"
530
549
  )
531
550
  self._id = uuid.UUID(cid)
532
- else:
533
- # Attach to existing
534
- self._id = conversation_id
535
- # Validate it exists
536
- _send_request(self._client, "GET", f"/api/conversations/{self._id}")
537
551
 
538
552
  # Initialize the remote state
539
553
  self._state = RemoteState(self._client, str(self._id))
@@ -719,12 +733,8 @@ class RemoteConversation(BaseConversation):
719
733
 
720
734
  if resp.status_code == 409:
721
735
  logger.info("Conversation is already running; skipping run trigger")
722
- if blocking:
723
- # Still wait for the existing run to complete
724
- self._wait_for_run_completion(poll_interval, timeout)
725
- return
726
-
727
- logger.info(f"run() triggered successfully: {resp}")
736
+ else:
737
+ logger.info(f"run() triggered successfully: {resp}")
728
738
 
729
739
  if blocking:
730
740
  self._wait_for_run_completion(poll_interval, timeout)
@@ -741,7 +751,9 @@ class RemoteConversation(BaseConversation):
741
751
  timeout: Maximum time in seconds to wait.
742
752
 
743
753
  Raises:
744
- ConversationRunError: If the wait times out.
754
+ ConversationRunError: If the run fails, the conversation disappears,
755
+ or the wait times out. Transient network errors, 429s, and 5xx
756
+ responses are retried until timeout.
745
757
  """
746
758
  start_time = time.monotonic()
747
759
 
@@ -757,56 +769,88 @@ class RemoteConversation(BaseConversation):
757
769
  )
758
770
 
759
771
  try:
760
- resp = _send_request(
761
- self._client,
762
- "GET",
763
- f"/api/conversations/{self._id}",
764
- timeout=30,
765
- )
766
- info = resp.json()
767
- status = info.get("execution_status")
768
-
769
- if status != ConversationExecutionStatus.RUNNING.value:
770
- if status == ConversationExecutionStatus.ERROR.value:
771
- detail = self._get_last_error_detail()
772
- raise ConversationRunError(
773
- self._id,
774
- RuntimeError(
775
- detail or "Remote conversation ended with error"
776
- ),
777
- )
778
- if status == ConversationExecutionStatus.STUCK.value:
779
- raise ConversationRunError(
780
- self._id,
781
- RuntimeError("Remote conversation got stuck"),
782
- )
772
+ status = self._poll_status_once()
773
+ except Exception as exc:
774
+ self._handle_poll_exception(exc)
775
+ else:
776
+ if self._handle_conversation_status(status):
783
777
  logger.info(
784
- f"Run completed with status: {status} (elapsed: {elapsed:.1f}s)"
778
+ "Run completed with status: %s (elapsed: %.1fs)",
779
+ status,
780
+ elapsed,
785
781
  )
786
782
  return
787
783
 
788
- except Exception as e:
789
- # Log but continue polling - transient network errors shouldn't
790
- # stop us from waiting for the run to complete
791
- logger.warning(f"Error polling status (will retry): {e}")
792
-
793
784
  time.sleep(poll_interval)
794
785
 
786
+ def _poll_status_once(self) -> str | None:
787
+ """Fetch the current execution status from the remote conversation."""
788
+ resp = _send_request(
789
+ self._client,
790
+ "GET",
791
+ f"/api/conversations/{self._id}",
792
+ timeout=30,
793
+ )
794
+ info = resp.json()
795
+ return info.get("execution_status")
796
+
797
+ def _handle_conversation_status(self, status: str | None) -> bool:
798
+ """Handle non-running statuses; return True if the run is complete."""
799
+ if status == ConversationExecutionStatus.RUNNING.value:
800
+ return False
801
+ if status == ConversationExecutionStatus.ERROR.value:
802
+ detail = self._get_last_error_detail()
803
+ raise ConversationRunError(
804
+ self._id,
805
+ RuntimeError(detail or "Remote conversation ended with error"),
806
+ )
807
+ if status == ConversationExecutionStatus.STUCK.value:
808
+ raise ConversationRunError(
809
+ self._id,
810
+ RuntimeError("Remote conversation got stuck"),
811
+ )
812
+ return True
813
+
814
+ def _handle_poll_exception(self, exc: Exception) -> None:
815
+ """Classify polling exceptions into retryable vs terminal failures."""
816
+ if isinstance(exc, httpx.HTTPStatusError):
817
+ status_code = exc.response.status_code
818
+ reason = exc.response.reason_phrase
819
+ if status_code == 404:
820
+ raise ConversationRunError(
821
+ self._id,
822
+ RuntimeError(
823
+ "Remote conversation not found (404). "
824
+ "The runtime may have been deleted."
825
+ ),
826
+ ) from exc
827
+ if 400 <= status_code < 500 and status_code != 429:
828
+ raise ConversationRunError(
829
+ self._id,
830
+ RuntimeError(f"Polling failed with HTTP {status_code} {reason}"),
831
+ ) from exc
832
+ logger.warning(
833
+ "Error polling status (will retry): HTTP %d %s",
834
+ status_code,
835
+ reason,
836
+ )
837
+ return
838
+ if isinstance(exc, httpx.RequestError):
839
+ logger.warning(f"Error polling status (will retry): {exc}")
840
+ return
841
+ raise ConversationRunError(self._id, exc) from exc
842
+
795
843
  def _get_last_error_detail(self) -> str | None:
796
844
  """Return the most recent ConversationErrorEvent detail, if available."""
797
- try:
798
- events = self._state.events
799
- for idx in range(len(events) - 1, -1, -1):
800
- event = events[idx]
801
- if isinstance(event, ConversationErrorEvent):
802
- detail = event.detail.strip()
803
- code = event.code.strip()
804
- if detail and code:
805
- return f"{code}: {detail}"
806
- return detail or code or None
807
- except Exception as exc:
808
- logger.debug("Failed to read conversation error detail: %s", exc)
809
- return None
845
+ events = self._state.events
846
+ for idx in range(len(events) - 1, -1, -1):
847
+ event = events[idx]
848
+ if isinstance(event, ConversationErrorEvent):
849
+ detail = event.detail.strip()
850
+ code = event.code.strip()
851
+ if detail and code:
852
+ return f"{code}: {detail}"
853
+ return detail or code or None
810
854
 
811
855
  def set_confirmation_policy(self, policy: ConfirmationPolicyBase) -> None:
812
856
  payload = {"policy": policy.model_dump()}
@@ -133,7 +133,6 @@ class ConversationState(OpenHandsModel):
133
133
  default_factory=FIFOLock
134
134
  ) # FIFO lock for thread safety
135
135
 
136
- # ===== Public "events" facade (Sequence[Event]) =====
137
136
  @property
138
137
  def events(self) -> EventLog:
139
138
  return self._events
@@ -200,12 +199,17 @@ class ConversationState(OpenHandsModel):
200
199
  f"but persisted state has {state.id}"
201
200
  )
202
201
 
203
- # Reconcile agent config with deserialized one
204
- resolved = agent.resolve_diff_from_deserialized(state.agent)
205
-
206
- # Attach runtime handles and commit reconciled agent (may autosave)
202
+ # Attach event log early so we can read history
207
203
  state._fs = file_store
208
204
  state._events = EventLog(file_store, dir_path=EVENTS_DIR)
205
+
206
+ # Reconcile agent config with deserialized one
207
+ # Pass event log so tool usage can be checked on-the-fly if needed
208
+ resolved = agent.resolve_diff_from_deserialized(
209
+ state.agent, events=state._events
210
+ )
211
+
212
+ # Commit reconciled agent (may autosave)
209
213
  state._autosave_enabled = True
210
214
  state.agent = resolved
211
215
 
openhands/sdk/llm/llm.py CHANGED
@@ -158,7 +158,6 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
158
158
  top_p: float | None = Field(default=1.0, ge=0, le=1)
159
159
  top_k: float | None = Field(default=None, ge=0)
160
160
 
161
- custom_llm_provider: str | None = Field(default=None)
162
161
  max_input_tokens: int | None = Field(
163
162
  default=None,
164
163
  ge=1,
@@ -342,7 +341,7 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
342
341
  _telemetry: Telemetry | None = PrivateAttr(default=None)
343
342
 
344
343
  model_config: ClassVar[ConfigDict] = ConfigDict(
345
- extra="forbid", arbitrary_types_allowed=True
344
+ extra="ignore", arbitrary_types_allowed=True
346
345
  )
347
346
 
348
347
  # =========================================================================
@@ -51,9 +51,12 @@ def select_chat_options(
51
51
  # Extended thinking models
52
52
  if get_features(llm.model).supports_extended_thinking:
53
53
  if llm.extended_thinking_budget:
54
+ # Anthropic throws errors if thinking budget equals or exceeds max output
55
+ # tokens -- force the thinking budget lower if there's a conflict
56
+ budget_tokens = min(llm.extended_thinking_budget, llm.max_output_tokens - 1)
54
57
  out["thinking"] = {
55
58
  "type": "enabled",
56
- "budget_tokens": llm.extended_thinking_budget,
59
+ "budget_tokens": budget_tokens,
57
60
  }
58
61
  # Enable interleaved thinking
59
62
  # Merge default header with any user-provided headers; user wins on conflict
@@ -50,7 +50,7 @@ VERIFIED_OPENHANDS_MODELS = [
50
50
  "gpt-5.1-codex",
51
51
  "gpt-5.1",
52
52
  "gemini-3-pro-preview",
53
- "deekseek-chat",
53
+ "deepseek-chat",
54
54
  "kimi-k2-thinking",
55
55
  "devstral-medium-2512",
56
56
  "devstral-2512",
openhands/sdk/mcp/tool.py CHANGED
@@ -186,7 +186,9 @@ class MCPToolDefinition(ToolDefinition[MCPToolAction, MCPToolObservation]):
186
186
  # Use exclude_none to avoid injecting nulls back to the call
187
187
  # Exclude DiscriminatedUnionMixin fields (e.g., 'kind') as they're
188
188
  # internal to OpenHands and not part of the MCP tool schema
189
- exclude_fields = set(DiscriminatedUnionMixin.model_fields.keys())
189
+ exclude_fields = set(DiscriminatedUnionMixin.model_fields.keys()) | set(
190
+ DiscriminatedUnionMixin.model_computed_fields.keys()
191
+ )
190
192
  sanitized = validated.model_dump(exclude_none=True, exclude=exclude_fields)
191
193
  return MCPToolAction(data=sanitized)
192
194
 
@@ -121,9 +121,12 @@ class Schema(DiscriminatedUnionMixin):
121
121
  # so it is fully compatible with MCP tool schema
122
122
  result = _process_schema_node(full_schema, full_schema.get("$defs", {}))
123
123
 
124
- # Remove 'kind' from properties if present (discriminator field, not for LLM)
125
- EXCLUDE_FIELDS = DiscriminatedUnionMixin.model_fields.keys()
126
- for f in EXCLUDE_FIELDS:
124
+ # Remove discriminator fields from properties (not for LLM)
125
+ # Need to exclude both regular fields and computed fields (like 'kind')
126
+ exclude_fields = set(DiscriminatedUnionMixin.model_fields.keys()) | set(
127
+ DiscriminatedUnionMixin.model_computed_fields.keys()
128
+ )
129
+ for f in exclude_fields:
127
130
  if "properties" in result and f in result["properties"]:
128
131
  result["properties"].pop(f)
129
132
  # Also remove from required if present