openhands-sdk 1.5.1__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/agent/base.py +32 -6
- openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
- openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
- openhands/sdk/agent/prompts/system_prompt.j2 +28 -0
- openhands/sdk/conversation/state.py +1 -0
- openhands/sdk/llm/llm.py +3 -2
- openhands/sdk/llm/message.py +2 -1
- openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
- openhands/sdk/tool/tool.py +2 -2
- openhands/sdk/utils/paging.py +63 -0
- {openhands_sdk-1.5.1.dist-info → openhands_sdk-1.6.0.dist-info}/METADATA +2 -2
- {openhands_sdk-1.5.1.dist-info → openhands_sdk-1.6.0.dist-info}/RECORD +17 -10
- {openhands_sdk-1.5.1.dist-info → openhands_sdk-1.6.0.dist-info}/WHEEL +0 -0
- {openhands_sdk-1.5.1.dist-info → openhands_sdk-1.6.0.dist-info}/top_level.txt +0 -0
openhands/sdk/agent/base.py
CHANGED
|
@@ -3,6 +3,7 @@ import re
|
|
|
3
3
|
import sys
|
|
4
4
|
from abc import ABC, abstractmethod
|
|
5
5
|
from collections.abc import Generator, Iterable
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
6
7
|
from typing import TYPE_CHECKING, Any
|
|
7
8
|
|
|
8
9
|
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
|
|
@@ -11,6 +12,7 @@ from openhands.sdk.context.agent_context import AgentContext
|
|
|
11
12
|
from openhands.sdk.context.condenser import CondenserBase, LLMSummarizingCondenser
|
|
12
13
|
from openhands.sdk.context.prompts.prompt import render_template
|
|
13
14
|
from openhands.sdk.llm import LLM
|
|
15
|
+
from openhands.sdk.llm.utils.model_prompt_spec import get_model_prompt_spec
|
|
14
16
|
from openhands.sdk.logger import get_logger
|
|
15
17
|
from openhands.sdk.mcp import create_mcp_tools
|
|
16
18
|
from openhands.sdk.tool import BUILT_IN_TOOLS, Tool, ToolDefinition, resolve_tool
|
|
@@ -163,6 +165,18 @@ class AgentBase(DiscriminatedUnionMixin, ABC):
|
|
|
163
165
|
def system_message(self) -> str:
|
|
164
166
|
"""Compute system message on-demand to maintain statelessness."""
|
|
165
167
|
template_kwargs = dict(self.system_prompt_kwargs)
|
|
168
|
+
template_kwargs.setdefault("model_name", self.llm.model)
|
|
169
|
+
if (
|
|
170
|
+
"model_family" not in template_kwargs
|
|
171
|
+
or "model_variant" not in template_kwargs
|
|
172
|
+
):
|
|
173
|
+
spec = get_model_prompt_spec(
|
|
174
|
+
self.llm.model, getattr(self.llm, "model_canonical_name", None)
|
|
175
|
+
)
|
|
176
|
+
if "model_family" not in template_kwargs and spec.family:
|
|
177
|
+
template_kwargs["model_family"] = spec.family
|
|
178
|
+
if "model_variant" not in template_kwargs and spec.variant:
|
|
179
|
+
template_kwargs["model_variant"] = spec.variant
|
|
166
180
|
system_message = render_template(
|
|
167
181
|
prompt_dir=self.prompt_dir,
|
|
168
182
|
template_name=self.system_prompt_filename,
|
|
@@ -196,13 +210,25 @@ class AgentBase(DiscriminatedUnionMixin, ABC):
|
|
|
196
210
|
return
|
|
197
211
|
|
|
198
212
|
tools: list[ToolDefinition] = []
|
|
199
|
-
for tool_spec in self.tools:
|
|
200
|
-
tools.extend(resolve_tool(tool_spec, state))
|
|
201
213
|
|
|
202
|
-
#
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
214
|
+
# Use ThreadPoolExecutor to parallelize tool resolution
|
|
215
|
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
216
|
+
futures = []
|
|
217
|
+
|
|
218
|
+
# Submit tool resolution tasks
|
|
219
|
+
for tool_spec in self.tools:
|
|
220
|
+
future = executor.submit(resolve_tool, tool_spec, state)
|
|
221
|
+
futures.append(future)
|
|
222
|
+
|
|
223
|
+
# Submit MCP tools creation if configured
|
|
224
|
+
if self.mcp_config:
|
|
225
|
+
future = executor.submit(create_mcp_tools, self.mcp_config, 30)
|
|
226
|
+
futures.append(future)
|
|
227
|
+
|
|
228
|
+
# Collect results as they complete
|
|
229
|
+
for future in futures:
|
|
230
|
+
result = future.result()
|
|
231
|
+
tools.extend(result)
|
|
206
232
|
|
|
207
233
|
logger.info(
|
|
208
234
|
f"Loaded {len(tools)} tools from spec: {[tool.name for tool in tools]}"
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
* Try to follow the instructions exactly as given - don't make extra or fewer actions if not asked.
|
|
2
|
+
* Avoid unnecessary defensive programming; do not add redundant fallbacks or default values — fail fast instead of masking misconfigurations.
|
|
3
|
+
* When backward compatibility expectations are unclear, confirm with the user before making changes that could break existing behavior.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
* Avoid being too proactive. Fulfill the user's request thoroughly: if they ask questions/investigations, answer them; if they ask for implementations, provide them. But do not take extra steps beyond what is requested.
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
* Stream your thinking and responses while staying concise; surface key assumptions and environment prerequisites explicitly.
|
|
2
|
+
* ALWAYS send a brief preamble to the user explaining what you're about to do before each tool call, using 8 - 12 words, with a friendly and curious tone.
|
|
3
|
+
* You have access to external resources and should actively use available tools to try accessing them first, rather than claiming you can’t access something without making an attempt.
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
* Stream your thinking and responses while staying concise; surface key assumptions and environment prerequisites explicitly.
|
|
2
|
+
* ALWAYS send a brief preamble to the user explaining what you're about to do before each tool call, using 8 - 12 words, with a friendly and curious tone.
|
|
3
|
+
* You have access to external resources and should actively use available tools to try accessing them first, rather than claiming you can’t access something without making an attempt.
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
When the user directly asks about any of the following:
|
|
2
|
+
- OpenHands capabilities (e.g., "can OpenHands do...", "does OpenHands have...")
|
|
3
|
+
- what you're able to do in second person (e.g., "are you able...", "can you...")
|
|
4
|
+
- how to use a specific OpenHands feature or product
|
|
5
|
+
- how to use the OpenHands SDK, CLI, GUI, or other OpenHands products
|
|
6
|
+
|
|
7
|
+
Get accurate information from the official OpenHands documentation at <https://docs.openhands.dev/>. The documentation includes:
|
|
8
|
+
|
|
9
|
+
**OpenHands SDK** (`/sdk/*`): Python library for building AI agents; Getting Started, Architecture, Guides (agent, llm, conversation, tools), API Reference
|
|
10
|
+
**OpenHands CLI** (`/openhands/usage/run-openhands/cli-mode`): Command-line interface
|
|
11
|
+
**OpenHands GUI** (`/openhands/usage/run-openhands/local-setup`): Local GUI and REST API
|
|
12
|
+
**OpenHands Cloud** (`/openhands/usage/run-openhands/cloud`): Hosted solution with integrations
|
|
13
|
+
**OpenHands Enterprise**: Self-hosted deployment with extended support
|
|
14
|
+
|
|
15
|
+
Always provide links to the relevant documentation pages for users who want to learn more.
|
|
@@ -5,6 +5,13 @@ You are OpenHands agent, a helpful AI assistant that can interact with a compute
|
|
|
5
5
|
* If the user asks a question, like "why is X happening", don't try to fix the problem. Just give an answer to the question.
|
|
6
6
|
</ROLE>
|
|
7
7
|
|
|
8
|
+
<MEMORY>
|
|
9
|
+
* Use `.openhands/skills/repo.md` under the repository root as your persistent memory for repository-specific knowledge and context.
|
|
10
|
+
* Add important insights, patterns, and learnings to this file to improve future task performance.
|
|
11
|
+
* This repository skill is automatically loaded for every conversation and helps maintain context across sessions.
|
|
12
|
+
* For more information about skills, see: https://docs.openhands.dev/overview/skills
|
|
13
|
+
</MEMORY>
|
|
14
|
+
|
|
8
15
|
<EFFICIENCY>
|
|
9
16
|
* Each action you take is somewhat expensive. Wherever possible, combine multiple actions into a single action, e.g. combine multiple bash commands into one, using sed and grep to edit/view multiple files at once.
|
|
10
17
|
* When exploring the codebase, use efficient tools like find, grep, and git commands with appropriate filters to minimize unnecessary operations.
|
|
@@ -62,6 +69,10 @@ You are OpenHands agent, a helpful AI assistant that can interact with a compute
|
|
|
62
69
|
5. VERIFICATION: If the environment is set up to run tests, test your implementation thoroughly, including edge cases. If the environment is not set up to run tests, consult with the user first before investing time to run tests.
|
|
63
70
|
</PROBLEM_SOLVING_WORKFLOW>
|
|
64
71
|
|
|
72
|
+
<SELF_DOCUMENTATION>
|
|
73
|
+
{% include 'self_documentation.j2' %}
|
|
74
|
+
</SELF_DOCUMENTATION>
|
|
75
|
+
|
|
65
76
|
<SECURITY>
|
|
66
77
|
{% include 'security_policy.j2' %}
|
|
67
78
|
</SECURITY>
|
|
@@ -102,3 +113,20 @@ You are OpenHands agent, a helpful AI assistant that can interact with a compute
|
|
|
102
113
|
- Prefer using `ps aux` to find the exact process ID (PID) first, then kill that specific PID
|
|
103
114
|
- When possible, use more targeted approaches like finding the PID from a pidfile or using application-specific shutdown commands
|
|
104
115
|
</PROCESS_MANAGEMENT>
|
|
116
|
+
|
|
117
|
+
{%- set _imp -%}
|
|
118
|
+
{%- if model_family -%}
|
|
119
|
+
{%- include "model_specific/" ~ model_family ~ ".j2" ignore missing -%}
|
|
120
|
+
{%- if model_variant -%}
|
|
121
|
+
{%- include "model_specific/" ~ model_family ~ "/" ~ model_variant ~ ".j2" ignore missing -%}
|
|
122
|
+
{%- endif -%}
|
|
123
|
+
{%- endif -%}
|
|
124
|
+
{%- endset -%}
|
|
125
|
+
|
|
126
|
+
{%- set _imp_trimmed = _imp | trim -%}
|
|
127
|
+
{%- if _imp_trimmed %}
|
|
128
|
+
|
|
129
|
+
<IMPORTANT>
|
|
130
|
+
{{ _imp_trimmed }}
|
|
131
|
+
</IMPORTANT>
|
|
132
|
+
{%- endif %}
|
|
@@ -42,6 +42,7 @@ class ConversationExecutionStatus(str, Enum):
|
|
|
42
42
|
FINISHED = "finished" # Conversation has completed the current task
|
|
43
43
|
ERROR = "error" # Conversation encountered an error (optional for future use)
|
|
44
44
|
STUCK = "stuck" # Conversation is stuck in a loop or unable to proceed
|
|
45
|
+
DELETING = "deleting" # Conversation is in the process of being deleted
|
|
45
46
|
|
|
46
47
|
|
|
47
48
|
class ConversationState(OpenHandsModel):
|
openhands/sdk/llm/llm.py
CHANGED
|
@@ -247,10 +247,11 @@ class LLM(BaseModel, RetryMixin, NonNativeToolCallingMixin):
|
|
|
247
247
|
"like HuggingFace and Groq."
|
|
248
248
|
),
|
|
249
249
|
)
|
|
250
|
-
reasoning_effort: Literal["low", "medium", "high", "none"] | None = Field(
|
|
250
|
+
reasoning_effort: Literal["low", "medium", "high", "xhigh", "none"] | None = Field(
|
|
251
251
|
default="high",
|
|
252
252
|
description="The effort to put into reasoning. "
|
|
253
|
-
"This is a string that can be one of 'low', 'medium', 'high',
|
|
253
|
+
"This is a string that can be one of 'low', 'medium', 'high', 'xhigh', "
|
|
254
|
+
"or 'none'. "
|
|
254
255
|
"Can apply to all reasoning models.",
|
|
255
256
|
)
|
|
256
257
|
reasoning_summary: Literal["auto", "concise", "detailed"] | None = Field(
|
openhands/sdk/llm/message.py
CHANGED
|
@@ -169,11 +169,12 @@ class TextContent(BaseContent):
|
|
|
169
169
|
model_config: ClassVar[ConfigDict] = ConfigDict(
|
|
170
170
|
extra="forbid", populate_by_name=True
|
|
171
171
|
)
|
|
172
|
+
enable_truncation: bool = True
|
|
172
173
|
|
|
173
174
|
def to_llm_dict(self) -> list[dict[str, str | dict[str, str]]]:
|
|
174
175
|
"""Convert to LLM API format."""
|
|
175
176
|
text = self.text
|
|
176
|
-
if len(text) > DEFAULT_TEXT_CONTENT_LIMIT:
|
|
177
|
+
if self.enable_truncation and len(text) > DEFAULT_TEXT_CONTENT_LIMIT:
|
|
177
178
|
logger.warning(
|
|
178
179
|
f"TextContent text length ({len(text)}) exceeds limit "
|
|
179
180
|
f"({DEFAULT_TEXT_CONTENT_LIMIT}), truncating"
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""Utilities for detecting model families and variants.
|
|
2
|
+
|
|
3
|
+
These helpers allow prompts and other systems to tailor behavior for specific
|
|
4
|
+
LLM providers while keeping naming heuristics centralized.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, ConfigDict
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ModelPromptSpec(BaseModel):
|
|
13
|
+
"""Detected prompt metadata for a given model configuration."""
|
|
14
|
+
|
|
15
|
+
model_config = ConfigDict(frozen=True)
|
|
16
|
+
|
|
17
|
+
family: str | None = None
|
|
18
|
+
variant: str | None = None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
_MODEL_FAMILY_PATTERNS: dict[str, tuple[str, ...]] = {
|
|
22
|
+
"openai_gpt": (
|
|
23
|
+
"gpt-",
|
|
24
|
+
"o1",
|
|
25
|
+
"o3",
|
|
26
|
+
"o4",
|
|
27
|
+
),
|
|
28
|
+
"anthropic_claude": ("claude",),
|
|
29
|
+
"google_gemini": ("gemini",),
|
|
30
|
+
"meta_llama": ("llama",),
|
|
31
|
+
"mistral": ("mistral",),
|
|
32
|
+
"deepseek": ("deepseek",),
|
|
33
|
+
"alibaba_qwen": ("qwen",),
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
# Ordered heuristics to pick the most specific variant available for a family.
|
|
37
|
+
_MODEL_VARIANT_PATTERNS: dict[str, tuple[tuple[str, tuple[str, ...]], ...]] = {
|
|
38
|
+
"openai_gpt": (
|
|
39
|
+
("gpt-5-codex", ("gpt-5-codex", "gpt-5.1-codex")),
|
|
40
|
+
("gpt-5", ("gpt-5", "gpt-5.1")),
|
|
41
|
+
),
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _normalize(name: str | None) -> str:
|
|
46
|
+
return (name or "").strip().lower()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _match_family(model_name: str) -> str | None:
|
|
50
|
+
normalized = _normalize(model_name)
|
|
51
|
+
if not normalized:
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
for family, patterns in _MODEL_FAMILY_PATTERNS.items():
|
|
55
|
+
if any(pattern in normalized for pattern in patterns):
|
|
56
|
+
return family
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _match_variant(
|
|
61
|
+
family: str,
|
|
62
|
+
model_name: str,
|
|
63
|
+
canonical_name: str | None = None,
|
|
64
|
+
) -> str | None:
|
|
65
|
+
patterns = _MODEL_VARIANT_PATTERNS.get(family)
|
|
66
|
+
if not patterns:
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
# Choose canonical_name if available, otherwise fall back to model_name
|
|
70
|
+
candidate = _normalize(canonical_name) or _normalize(model_name)
|
|
71
|
+
if not candidate:
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
for variant, substrings in patterns:
|
|
75
|
+
if any(sub in candidate for sub in substrings):
|
|
76
|
+
return variant
|
|
77
|
+
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def get_model_prompt_spec(
|
|
82
|
+
model_name: str,
|
|
83
|
+
canonical_name: str | None = None,
|
|
84
|
+
) -> ModelPromptSpec:
|
|
85
|
+
"""Return family and variant prompt metadata for the given identifiers."""
|
|
86
|
+
|
|
87
|
+
family = _match_family(model_name)
|
|
88
|
+
if family is None and canonical_name:
|
|
89
|
+
family = _match_family(canonical_name)
|
|
90
|
+
|
|
91
|
+
variant = None
|
|
92
|
+
if family is not None:
|
|
93
|
+
variant = _match_variant(family, model_name, canonical_name)
|
|
94
|
+
|
|
95
|
+
return ModelPromptSpec(family=family, variant=variant)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
__all__ = ["ModelPromptSpec", "get_model_prompt_spec"]
|
openhands/sdk/tool/tool.py
CHANGED
|
@@ -364,7 +364,7 @@ class ToolDefinition[ActionT, ObservationT](DiscriminatedUnionMixin, ABC):
|
|
|
364
364
|
action_type: type[Schema] | None = None,
|
|
365
365
|
) -> dict[str, Any]:
|
|
366
366
|
action_type = action_type or self.action_type
|
|
367
|
-
action_type_with_risk =
|
|
367
|
+
action_type_with_risk = create_action_type_with_risk(action_type)
|
|
368
368
|
|
|
369
369
|
add_security_risk_prediction = add_security_risk_prediction and (
|
|
370
370
|
self.annotations is None or (not self.annotations.readOnlyHint)
|
|
@@ -460,7 +460,7 @@ class ToolDefinition[ActionT, ObservationT](DiscriminatedUnionMixin, ABC):
|
|
|
460
460
|
raise ValueError(error_msg)
|
|
461
461
|
|
|
462
462
|
|
|
463
|
-
def
|
|
463
|
+
def create_action_type_with_risk(action_type: type[Schema]) -> type[Schema]:
|
|
464
464
|
action_type_with_risk = _action_types_with_risk.get(action_type)
|
|
465
465
|
if action_type_with_risk:
|
|
466
466
|
return action_type_with_risk
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Pagination utilities for iterating over paginated search results."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import AsyncGenerator, Awaitable, Callable
|
|
4
|
+
from typing import Any, Protocol
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class PageProtocol[T](Protocol):
|
|
8
|
+
"""Protocol for page objects returned by search functions.
|
|
9
|
+
|
|
10
|
+
All page objects should have:
|
|
11
|
+
- items: A list of items of type T
|
|
12
|
+
- next_page_id: Optional string for pagination
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
items: list[T]
|
|
16
|
+
next_page_id: str | None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
async def page_iterator[T](
|
|
20
|
+
search_func: Callable[..., Awaitable[PageProtocol[T]]],
|
|
21
|
+
*args: Any,
|
|
22
|
+
**kwargs: Any,
|
|
23
|
+
) -> AsyncGenerator[T, None]:
|
|
24
|
+
"""
|
|
25
|
+
Iterate over items from paginated search results.
|
|
26
|
+
|
|
27
|
+
This utility function handles pagination automatically by calling the search
|
|
28
|
+
function repeatedly with updated page_id parameters until all pages are
|
|
29
|
+
exhausted.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
search_func: An async function that returns a PageProtocol[T] object
|
|
33
|
+
with 'items' and 'next_page_id' attributes
|
|
34
|
+
*args: Positional arguments to pass to the search function
|
|
35
|
+
**kwargs: Keyword arguments to pass to the search function
|
|
36
|
+
|
|
37
|
+
Yields:
|
|
38
|
+
Individual items of type T from each page
|
|
39
|
+
|
|
40
|
+
Example:
|
|
41
|
+
async for event in page_iterator(event_service.search_events, limit=50):
|
|
42
|
+
await send_event(event, websocket)
|
|
43
|
+
|
|
44
|
+
async for conversation in page_iterator(
|
|
45
|
+
conversation_service.search_conversations,
|
|
46
|
+
execution_status=ConversationExecutionStatus.RUNNING
|
|
47
|
+
):
|
|
48
|
+
print(conversation.title)
|
|
49
|
+
"""
|
|
50
|
+
page_id = kwargs.pop("page_id", None)
|
|
51
|
+
|
|
52
|
+
while True:
|
|
53
|
+
# Call the search function with current page_id
|
|
54
|
+
page = await search_func(*args, page_id=page_id, **kwargs)
|
|
55
|
+
|
|
56
|
+
# Yield each item from the current page
|
|
57
|
+
for item in page.items:
|
|
58
|
+
yield item
|
|
59
|
+
|
|
60
|
+
# Check if there are more pages
|
|
61
|
+
page_id = page.next_page_id
|
|
62
|
+
if not page_id:
|
|
63
|
+
break
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openhands-sdk
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.6.0
|
|
4
4
|
Summary: OpenHands SDK - Core functionality for building AI agents
|
|
5
5
|
Requires-Python: >=3.12
|
|
6
6
|
Requires-Dist: deprecation>=2.1.0
|
|
@@ -12,6 +12,6 @@ Requires-Dist: python-frontmatter>=1.1.0
|
|
|
12
12
|
Requires-Dist: python-json-logger>=3.3.0
|
|
13
13
|
Requires-Dist: tenacity>=9.1.2
|
|
14
14
|
Requires-Dist: websockets>=12
|
|
15
|
-
Requires-Dist: lmnr>=0.7.
|
|
15
|
+
Requires-Dist: lmnr>=0.7.24
|
|
16
16
|
Provides-Extra: boto3
|
|
17
17
|
Requires-Dist: boto3>=1.35.0; extra == "boto3"
|
|
@@ -2,17 +2,22 @@ openhands/sdk/__init__.py,sha256=SsB5acHhWvF6e3FlbR72PzZHH9ByJiXc7vnwxBPqmhw,237
|
|
|
2
2
|
openhands/sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
3
|
openhands/sdk/agent/__init__.py,sha256=yOn1ZCgTTq2VJlTzKDSzmWVPli1siBzqV89vlEHCwOg,137
|
|
4
4
|
openhands/sdk/agent/agent.py,sha256=OCR7JJytiiTHLuFwU3XjMtJx11Y2Io-DamdbMa2mf60,23760
|
|
5
|
-
openhands/sdk/agent/base.py,sha256=
|
|
5
|
+
openhands/sdk/agent/base.py,sha256=1Hyl1cwWeTvXbasPA8_aScJLQAFID0lfax8TY2XOrlk,17301
|
|
6
6
|
openhands/sdk/agent/utils.py,sha256=alYsAQ611XQ_94ogJacYD22BLbgSJjzd3Xex_b9KuC4,7418
|
|
7
7
|
openhands/sdk/agent/prompts/in_context_learning_example.j2,sha256=MGB0dPUlh6pwLoR_dBK-M3e5dtETX6C6WNjPcPixZmU,5512
|
|
8
8
|
openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2,sha256=k3Zwnd7Iq7kL4lo307RDuu1mxWXn6pSLsEdvKEXN3BU,164
|
|
9
9
|
openhands/sdk/agent/prompts/security_policy.j2,sha256=K56d2aaZ88DI-y2DsMSDaiZRTTnkkzuBLjbzXfKHGA8,993
|
|
10
10
|
openhands/sdk/agent/prompts/security_risk_assessment.j2,sha256=7o1tk6MIQpVD7sAES-sBhw4ckYLGQydzYnjjNitP5iY,1196
|
|
11
|
-
openhands/sdk/agent/prompts/
|
|
11
|
+
openhands/sdk/agent/prompts/self_documentation.j2,sha256=Q0nLryVYnGg8Qk056-3tW2WJ2rwBijC5iqJByEdGAT0,1063
|
|
12
|
+
openhands/sdk/agent/prompts/system_prompt.j2,sha256=Hox3uOYhAToFl-16CBevKR_D2qu0SPlAg-KysH6VEqc,9067
|
|
12
13
|
openhands/sdk/agent/prompts/system_prompt_interactive.j2,sha256=AW3rGuqu82BqbS1XMXVO4Fp-Apa8DPYZV3_nQYkzVtM,1388
|
|
13
14
|
openhands/sdk/agent/prompts/system_prompt_long_horizon.j2,sha256=_oOHRIer_FSuRrBOSOPpe5Ueo9KgSTba5SPoHHpghCI,2995
|
|
14
15
|
openhands/sdk/agent/prompts/system_prompt_planning.j2,sha256=wh01KX7yzeUSn5PtG3Ij0keRM6vmQuZZM6I0Fod4DK4,2934
|
|
15
16
|
openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2,sha256=Yq9H7hHen2-tNsfBq9RlAWpyWsVRpjHhzmziZv8JHs8,5005
|
|
17
|
+
openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2,sha256=Cf4vXqlwoh94hOsLqcuIeMailgkrbGLF9vv0wHt4eEM,379
|
|
18
|
+
openhands/sdk/agent/prompts/model_specific/google_gemini.j2,sha256=DvGy_9yuqeyKJ8GDF_qVpQz4ri-rSCRl1awqPE1ImSs,219
|
|
19
|
+
openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2,sha256=iMnSKjg2nrFoRUCkNMlGLoMmNWv_9bzK7D82dkJZCbQ,466
|
|
20
|
+
openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2,sha256=iMnSKjg2nrFoRUCkNMlGLoMmNWv_9bzK7D82dkJZCbQ,466
|
|
16
21
|
openhands/sdk/context/__init__.py,sha256=dsOiCbO-eizN7HiGn_ZSSgH-EE841ygwalMzWz9GbVY,557
|
|
17
22
|
openhands/sdk/context/agent_context.py,sha256=CFJRTk0QHcHLB5nL9pKaXiKpVU2UwFNZdYf9p8TArs4,9318
|
|
18
23
|
openhands/sdk/context/view.py,sha256=j8R4r1PSKFUJR_G4qDyI8ToqyHbWtU0YRKDBm8LJm4I,9260
|
|
@@ -44,7 +49,7 @@ openhands/sdk/conversation/persistence_const.py,sha256=om3pOQa5sGK8t_NUYb3Tz-7sK
|
|
|
44
49
|
openhands/sdk/conversation/response_utils.py,sha256=rPlC3cDSmoQte6NZ0kK6h6-9ho5cbF8jEw-DiyEhgIM,1548
|
|
45
50
|
openhands/sdk/conversation/secret_registry.py,sha256=6fY1zRxb55rC4uIMFcR0lDssIyyjaPh9pCWGqDikrek,4446
|
|
46
51
|
openhands/sdk/conversation/serialization_diff.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
|
-
openhands/sdk/conversation/state.py,sha256=
|
|
52
|
+
openhands/sdk/conversation/state.py,sha256=VP_KKRO59aj6eCSG4D4mQWwwYzGIkDBYd-XXlWpmyRY,13123
|
|
48
53
|
openhands/sdk/conversation/stuck_detector.py,sha256=PZF0HWC6G0SUud_U3hiv5r4AqfJJMw5-ookEWVOY5sY,10866
|
|
49
54
|
openhands/sdk/conversation/title_utils.py,sha256=j40-dP-Oes-mhU2xUC7fCC8cB0wkMdbbDJU7WLHiVIo,7063
|
|
50
55
|
openhands/sdk/conversation/types.py,sha256=CMCCJz6fSfWgaAgWXeorEDC8VSXyyplyiMlpcueszT8,423
|
|
@@ -84,10 +89,10 @@ openhands/sdk/io/base.py,sha256=kAcX0chfCswakiieJlKiHWoJgL3zOtaQauRqMPNYfW8,1355
|
|
|
84
89
|
openhands/sdk/io/local.py,sha256=H1wjnBS0EkBJxEatWqNpteL1bPBHoetcnhrIeou4uEY,2991
|
|
85
90
|
openhands/sdk/io/memory.py,sha256=XIsdXsSyF-PzoYVmvJuO7Vtz-k3D5jMOFoZ5gHw8tbA,1712
|
|
86
91
|
openhands/sdk/llm/__init__.py,sha256=k8UneyfoDUMe0lSP4GSlYzrL2Fe3MkDUKpSg2OIDi_I,1206
|
|
87
|
-
openhands/sdk/llm/llm.py,sha256=
|
|
92
|
+
openhands/sdk/llm/llm.py,sha256=bvUoK9e0qH6yhT3JmB1TjeH-aYJWunVL4t9EoPMoJnY,44706
|
|
88
93
|
openhands/sdk/llm/llm_registry.py,sha256=DL9yqSbAM7OBkzdIChLuxG2qk_oElW2tC2xem6mq0F8,3530
|
|
89
94
|
openhands/sdk/llm/llm_response.py,sha256=DaBVBkij4Sz-RsYhRb3UUcvJCTzCBcOYQ9IhFwN4ukI,1988
|
|
90
|
-
openhands/sdk/llm/message.py,sha256=
|
|
95
|
+
openhands/sdk/llm/message.py,sha256=kbqWXKQ4-66q8MEAm-3QqGG4ZPrnkEKHQ8WHX2iFBr0,25491
|
|
91
96
|
openhands/sdk/llm/streaming.py,sha256=tFJ7B0AjJ-e8Xv13DTtc2FdrsLRUCG8wxQex8fDlOp4,214
|
|
92
97
|
openhands/sdk/llm/exceptions/__init__.py,sha256=6iMJah2nS6BboU06HqgAM2JT6aykCWY8muoUwaaJpR8,1144
|
|
93
98
|
openhands/sdk/llm/exceptions/classifier.py,sha256=pu5fVNubUrB3eXV1i5W7m4-D4Ik2Z-fGe1ba2t0SSc4,1456
|
|
@@ -106,6 +111,7 @@ openhands/sdk/llm/router/impl/random.py,sha256=oBHoFTBMa9OeDyg-rV4siLCkN6rKYL0uD
|
|
|
106
111
|
openhands/sdk/llm/utils/metrics.py,sha256=4zD0Hkc9Oc4qcDcVZUX13RyggyObshUbz4Ik9W1uIw4,11592
|
|
107
112
|
openhands/sdk/llm/utils/model_features.py,sha256=JMGShnYBplGL6O6f19YC2EqI3dIviNN_58TFbflBnlA,4607
|
|
108
113
|
openhands/sdk/llm/utils/model_info.py,sha256=1mFYA7OcEyUB6k1doao8_w1XT7UMM_DAm57HcTpKkLw,2628
|
|
114
|
+
openhands/sdk/llm/utils/model_prompt_spec.py,sha256=onw9-y7x0aJS8IOjNzeqhdvcFNwK1l_s0XgurWlnj5o,2587
|
|
109
115
|
openhands/sdk/llm/utils/retry_mixin.py,sha256=M-hXp8EwP1FjNN6tgHiv133BtUQgRr9Kz_ZWxeAJLGA,4765
|
|
110
116
|
openhands/sdk/llm/utils/telemetry.py,sha256=_csSRWg80djE27QN9Mg3palVTP-LJ7BbttOaHQYykZU,13970
|
|
111
117
|
openhands/sdk/llm/utils/unverified_models.py,sha256=SmYrX_WxXOJBanTviztqy1xPjOcLY4i3qvwNBEga_Dk,4797
|
|
@@ -133,7 +139,7 @@ openhands/sdk/tool/__init__.py,sha256=Fpcy_I5CYC8lDf2KNLj7LJg9YpoDObbacMFOvM9fI5
|
|
|
133
139
|
openhands/sdk/tool/registry.py,sha256=x5j8DwOjH3YJijhXe_3nRPsSr1A2sDPpNhxCuvFrHK0,5456
|
|
134
140
|
openhands/sdk/tool/schema.py,sha256=PdQbkzPER-Z9B-eKPMr16M8ap27gghszq1xSHH5DGZQ,9114
|
|
135
141
|
openhands/sdk/tool/spec.py,sha256=EbtWasVhwjLKNJywHubbwfYqfgXnZbU4QE6XUOLhsdk,1221
|
|
136
|
-
openhands/sdk/tool/tool.py,sha256=
|
|
142
|
+
openhands/sdk/tool/tool.py,sha256=UrsUiBmywGXtCKlGCxSF650wSx9kX76RpaTs6U3Jb5w,17796
|
|
137
143
|
openhands/sdk/tool/builtins/__init__.py,sha256=30MfENomH9JBPwKs03Xwgny09dDUOt3FSIQ3egRJKhM,729
|
|
138
144
|
openhands/sdk/tool/builtins/finish.py,sha256=pPv_bKDOQE4sUK9lNh1H1lDaxavHozjOwLCIuYrb424,3136
|
|
139
145
|
openhands/sdk/tool/builtins/think.py,sha256=Jp8CBHJZwrtNuVCLrxKlVwb9HeQ1lZB56PCYMxW3wWk,4050
|
|
@@ -146,6 +152,7 @@ openhands/sdk/utils/deprecation.py,sha256=7XwepMKTwBLaqS-4rdJupKxNZkepV0Mrru3uTQ
|
|
|
146
152
|
openhands/sdk/utils/github.py,sha256=l-_LQKw820dA_U8NmDdmaReAPhgMAWqfH1oRMi1q5OA,1638
|
|
147
153
|
openhands/sdk/utils/json.py,sha256=hHAA7i7NCJrQhb5WWSsoT0nvmJUi0koyBdbviFrfdcM,1384
|
|
148
154
|
openhands/sdk/utils/models.py,sha256=tOTGa4O1vHZgHvOp2mNv5V4hM3fBSM6Sswv-0oYVyJQ,20676
|
|
155
|
+
openhands/sdk/utils/paging.py,sha256=pgSQTe8_pArNjW8jwIyB3XKK04cf4knRHZbgrJrCm8Y,1962
|
|
149
156
|
openhands/sdk/utils/pydantic_diff.py,sha256=vOy4M1XKKDkCzp7RXBvnibvPYWL8fNEDtWNUn_-N1yg,2732
|
|
150
157
|
openhands/sdk/utils/pydantic_secrets.py,sha256=B9njRdijnqO4g-GDCWRsWd-TZc5GTjMncDEPMFP_aUE,2186
|
|
151
158
|
openhands/sdk/utils/truncate.py,sha256=LYRA45CBR7daSC03gs80nu7PYdROh943_niOpaoquKU,4390
|
|
@@ -159,7 +166,7 @@ openhands/sdk/workspace/remote/__init__.py,sha256=eKkj6NOESMUBGDVC6_L2Wfuc4K6G-m
|
|
|
159
166
|
openhands/sdk/workspace/remote/async_remote_workspace.py,sha256=ftv1Vdx4mmM3AjygJpemMJGvhaQel7ORxdQVk12z4ZE,5061
|
|
160
167
|
openhands/sdk/workspace/remote/base.py,sha256=72C9MZV7ch5n6oHNvFMo6irW7b6Le8n4gk3yFuc0798,5605
|
|
161
168
|
openhands/sdk/workspace/remote/remote_workspace_mixin.py,sha256=CzHfnLUIra5sgPkP9kcggb1vHGOPpYQzLsHvGO2rRt0,10963
|
|
162
|
-
openhands_sdk-1.
|
|
163
|
-
openhands_sdk-1.
|
|
164
|
-
openhands_sdk-1.
|
|
165
|
-
openhands_sdk-1.
|
|
169
|
+
openhands_sdk-1.6.0.dist-info/METADATA,sha256=-iq0pMLBdi7ZQVeOkbxiX6rDYLcDKlcY4eEUJIb9F70,545
|
|
170
|
+
openhands_sdk-1.6.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
171
|
+
openhands_sdk-1.6.0.dist-info/top_level.txt,sha256=jHgVu9I0Blam8BXFgedoGKfglPF8XvW1TsJFIjcgP4E,10
|
|
172
|
+
openhands_sdk-1.6.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|