openhands 0.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openhands might be problematic. Click here for more details.

Files changed (124) hide show
  1. openhands-1.0.1.dist-info/METADATA +52 -0
  2. openhands-1.0.1.dist-info/RECORD +31 -0
  3. {openhands-0.0.0.dist-info → openhands-1.0.1.dist-info}/WHEEL +1 -2
  4. openhands-1.0.1.dist-info/entry_points.txt +2 -0
  5. openhands_cli/__init__.py +8 -0
  6. openhands_cli/agent_chat.py +186 -0
  7. openhands_cli/argparsers/main_parser.py +56 -0
  8. openhands_cli/argparsers/serve_parser.py +31 -0
  9. openhands_cli/gui_launcher.py +220 -0
  10. openhands_cli/listeners/__init__.py +4 -0
  11. openhands_cli/listeners/loading_listener.py +63 -0
  12. openhands_cli/listeners/pause_listener.py +83 -0
  13. openhands_cli/llm_utils.py +57 -0
  14. openhands_cli/locations.py +13 -0
  15. openhands_cli/pt_style.py +30 -0
  16. openhands_cli/runner.py +178 -0
  17. openhands_cli/setup.py +116 -0
  18. openhands_cli/simple_main.py +59 -0
  19. openhands_cli/tui/__init__.py +5 -0
  20. openhands_cli/tui/settings/mcp_screen.py +217 -0
  21. openhands_cli/tui/settings/settings_screen.py +202 -0
  22. openhands_cli/tui/settings/store.py +93 -0
  23. openhands_cli/tui/status.py +109 -0
  24. openhands_cli/tui/tui.py +100 -0
  25. openhands_cli/tui/utils.py +14 -0
  26. openhands_cli/user_actions/__init__.py +17 -0
  27. openhands_cli/user_actions/agent_action.py +95 -0
  28. openhands_cli/user_actions/exit_session.py +18 -0
  29. openhands_cli/user_actions/settings_action.py +171 -0
  30. openhands_cli/user_actions/types.py +18 -0
  31. openhands_cli/user_actions/utils.py +199 -0
  32. openhands/__init__.py +0 -1
  33. openhands/sdk/__init__.py +0 -45
  34. openhands/sdk/agent/__init__.py +0 -8
  35. openhands/sdk/agent/agent/__init__.py +0 -6
  36. openhands/sdk/agent/agent/agent.py +0 -349
  37. openhands/sdk/agent/base.py +0 -103
  38. openhands/sdk/context/__init__.py +0 -28
  39. openhands/sdk/context/agent_context.py +0 -153
  40. openhands/sdk/context/condenser/__init__.py +0 -5
  41. openhands/sdk/context/condenser/condenser.py +0 -73
  42. openhands/sdk/context/condenser/no_op_condenser.py +0 -13
  43. openhands/sdk/context/manager.py +0 -5
  44. openhands/sdk/context/microagents/__init__.py +0 -26
  45. openhands/sdk/context/microagents/exceptions.py +0 -11
  46. openhands/sdk/context/microagents/microagent.py +0 -345
  47. openhands/sdk/context/microagents/types.py +0 -70
  48. openhands/sdk/context/utils/__init__.py +0 -8
  49. openhands/sdk/context/utils/prompt.py +0 -52
  50. openhands/sdk/context/view.py +0 -116
  51. openhands/sdk/conversation/__init__.py +0 -12
  52. openhands/sdk/conversation/conversation.py +0 -207
  53. openhands/sdk/conversation/state.py +0 -50
  54. openhands/sdk/conversation/types.py +0 -6
  55. openhands/sdk/conversation/visualizer.py +0 -300
  56. openhands/sdk/event/__init__.py +0 -27
  57. openhands/sdk/event/base.py +0 -148
  58. openhands/sdk/event/condenser.py +0 -49
  59. openhands/sdk/event/llm_convertible.py +0 -265
  60. openhands/sdk/event/types.py +0 -5
  61. openhands/sdk/event/user_action.py +0 -12
  62. openhands/sdk/event/utils.py +0 -30
  63. openhands/sdk/llm/__init__.py +0 -19
  64. openhands/sdk/llm/exceptions.py +0 -108
  65. openhands/sdk/llm/llm.py +0 -867
  66. openhands/sdk/llm/llm_registry.py +0 -116
  67. openhands/sdk/llm/message.py +0 -216
  68. openhands/sdk/llm/metadata.py +0 -34
  69. openhands/sdk/llm/utils/fn_call_converter.py +0 -1049
  70. openhands/sdk/llm/utils/metrics.py +0 -311
  71. openhands/sdk/llm/utils/model_features.py +0 -153
  72. openhands/sdk/llm/utils/retry_mixin.py +0 -122
  73. openhands/sdk/llm/utils/telemetry.py +0 -252
  74. openhands/sdk/logger.py +0 -167
  75. openhands/sdk/mcp/__init__.py +0 -20
  76. openhands/sdk/mcp/client.py +0 -113
  77. openhands/sdk/mcp/definition.py +0 -69
  78. openhands/sdk/mcp/tool.py +0 -104
  79. openhands/sdk/mcp/utils.py +0 -59
  80. openhands/sdk/tests/llm/test_llm.py +0 -447
  81. openhands/sdk/tests/llm/test_llm_fncall_converter.py +0 -691
  82. openhands/sdk/tests/llm/test_model_features.py +0 -221
  83. openhands/sdk/tool/__init__.py +0 -30
  84. openhands/sdk/tool/builtins/__init__.py +0 -34
  85. openhands/sdk/tool/builtins/finish.py +0 -57
  86. openhands/sdk/tool/builtins/think.py +0 -60
  87. openhands/sdk/tool/schema.py +0 -236
  88. openhands/sdk/tool/security_prompt.py +0 -5
  89. openhands/sdk/tool/tool.py +0 -142
  90. openhands/sdk/utils/__init__.py +0 -14
  91. openhands/sdk/utils/discriminated_union.py +0 -210
  92. openhands/sdk/utils/json.py +0 -48
  93. openhands/sdk/utils/truncate.py +0 -44
  94. openhands/tools/__init__.py +0 -44
  95. openhands/tools/execute_bash/__init__.py +0 -30
  96. openhands/tools/execute_bash/constants.py +0 -31
  97. openhands/tools/execute_bash/definition.py +0 -166
  98. openhands/tools/execute_bash/impl.py +0 -38
  99. openhands/tools/execute_bash/metadata.py +0 -101
  100. openhands/tools/execute_bash/terminal/__init__.py +0 -22
  101. openhands/tools/execute_bash/terminal/factory.py +0 -113
  102. openhands/tools/execute_bash/terminal/interface.py +0 -189
  103. openhands/tools/execute_bash/terminal/subprocess_terminal.py +0 -412
  104. openhands/tools/execute_bash/terminal/terminal_session.py +0 -492
  105. openhands/tools/execute_bash/terminal/tmux_terminal.py +0 -160
  106. openhands/tools/execute_bash/utils/command.py +0 -150
  107. openhands/tools/str_replace_editor/__init__.py +0 -17
  108. openhands/tools/str_replace_editor/definition.py +0 -158
  109. openhands/tools/str_replace_editor/editor.py +0 -683
  110. openhands/tools/str_replace_editor/exceptions.py +0 -41
  111. openhands/tools/str_replace_editor/impl.py +0 -66
  112. openhands/tools/str_replace_editor/utils/__init__.py +0 -0
  113. openhands/tools/str_replace_editor/utils/config.py +0 -2
  114. openhands/tools/str_replace_editor/utils/constants.py +0 -9
  115. openhands/tools/str_replace_editor/utils/encoding.py +0 -135
  116. openhands/tools/str_replace_editor/utils/file_cache.py +0 -154
  117. openhands/tools/str_replace_editor/utils/history.py +0 -122
  118. openhands/tools/str_replace_editor/utils/shell.py +0 -72
  119. openhands/tools/task_tracker/__init__.py +0 -16
  120. openhands/tools/task_tracker/definition.py +0 -336
  121. openhands/tools/utils/__init__.py +0 -1
  122. openhands-0.0.0.dist-info/METADATA +0 -3
  123. openhands-0.0.0.dist-info/RECORD +0 -94
  124. openhands-0.0.0.dist-info/top_level.txt +0 -1
@@ -1,116 +0,0 @@
1
- from typing import Callable
2
- from uuid import uuid4
3
-
4
- from pydantic import BaseModel, ConfigDict
5
-
6
- from openhands.sdk.llm.llm import LLM
7
- from openhands.sdk.logger import get_logger
8
-
9
-
10
- logger = get_logger(__name__)
11
-
12
-
13
- class RegistryEvent(BaseModel):
14
- llm: LLM
15
- service_id: str
16
-
17
- model_config = ConfigDict(
18
- arbitrary_types_allowed=True,
19
- )
20
-
21
-
22
- class LLMRegistry:
23
- """A minimal LLM registry for managing LLM instances by service ID.
24
-
25
- This registry provides a simple way to manage multiple LLM instances,
26
- avoiding the need to recreate LLMs with the same configuration.
27
- """
28
-
29
- def __init__(
30
- self,
31
- retry_listener: Callable[[int, int], None] | None = None,
32
- ):
33
- """Initialize the LLM registry.
34
-
35
- Args:
36
- retry_listener: Optional callback for retry events.
37
- """
38
- self.registry_id = str(uuid4())
39
- self.retry_listener = retry_listener
40
- self.service_to_llm: dict[str, LLM] = {}
41
- self.subscriber: Callable[[RegistryEvent], None] | None = None
42
-
43
- def subscribe(self, callback: Callable[[RegistryEvent], None]) -> None:
44
- """Subscribe to registry events.
45
-
46
- Args:
47
- callback: Function to call when LLMs are created or updated.
48
- """
49
- self.subscriber = callback
50
-
51
- def notify(self, event: RegistryEvent) -> None:
52
- """Notify subscribers of registry events.
53
-
54
- Args:
55
- event: The registry event to notify about.
56
- """
57
- if self.subscriber:
58
- try:
59
- self.subscriber(event)
60
- except Exception as e:
61
- logger.warning(f"Failed to emit event: {e}")
62
-
63
- def add(self, service_id: str, llm: LLM) -> None:
64
- """Add an LLM instance to the registry.
65
-
66
- Args:
67
- service_id: Unique identifier for the LLM service.
68
- llm: The LLM instance to register.
69
-
70
- Raises:
71
- ValueError: If service_id already exists in the registry.
72
- """
73
- if service_id in self.service_to_llm:
74
- raise ValueError(
75
- f"Service ID '{service_id}' already exists in registry. "
76
- "Use a different service_id or call get() to retrieve the existing LLM."
77
- )
78
-
79
- # Set the service_id on the LLM instance
80
- llm.service_id = service_id
81
- self.service_to_llm[service_id] = llm
82
- self.notify(RegistryEvent(llm=llm, service_id=service_id))
83
- logger.info(
84
- f"[LLM registry {self.registry_id}]: Added LLM for service {service_id}"
85
- )
86
-
87
- def get(self, service_id: str) -> LLM:
88
- """Get an LLM instance from the registry.
89
-
90
- Args:
91
- service_id: Unique identifier for the LLM service.
92
-
93
- Returns:
94
- The LLM instance.
95
-
96
- Raises:
97
- KeyError: If service_id is not found in the registry.
98
- """
99
- if service_id not in self.service_to_llm:
100
- raise KeyError(
101
- f"Service ID '{service_id}' not found in registry. "
102
- "Use add() to register an LLM first."
103
- )
104
-
105
- logger.info(
106
- f"[LLM registry {self.registry_id}]: Retrieved LLM for service {service_id}"
107
- )
108
- return self.service_to_llm[service_id]
109
-
110
- def list_services(self) -> list[str]:
111
- """List all registered service IDs.
112
-
113
- Returns:
114
- List of service IDs currently in the registry.
115
- """
116
- return list(self.service_to_llm.keys())
@@ -1,216 +0,0 @@
1
- from typing import Any, Literal, cast
2
-
3
- import mcp.types
4
- from litellm import ChatCompletionMessageToolCall
5
- from litellm.types.utils import Message as LiteLLMMessage
6
- from pydantic import BaseModel, ConfigDict, Field
7
-
8
- from openhands.sdk.logger import get_logger
9
- from openhands.sdk.utils import DEFAULT_TEXT_CONTENT_LIMIT, maybe_truncate
10
-
11
-
12
- logger = get_logger(__name__)
13
-
14
-
15
- class BaseContent(BaseModel):
16
- cache_prompt: bool = False
17
-
18
- def to_llm_dict(
19
- self,
20
- ) -> dict[str, str | dict[str, str]] | list[dict[str, str | dict[str, str]]]:
21
- """Convert to LLM API format. Subclasses should implement this method."""
22
- raise NotImplementedError("Subclasses should implement this method.")
23
-
24
-
25
- class TextContent(mcp.types.TextContent, BaseContent):
26
- type: Literal["text"] = "text"
27
- text: str
28
- # We use populate_by_name since mcp.types.TextContent
29
- # alias meta -> _meta, but .model_dumps() will output "meta"
30
- model_config = ConfigDict(extra="forbid", populate_by_name=True)
31
-
32
- def to_llm_dict(self) -> dict[str, str | dict[str, str]]:
33
- """Convert to LLM API format."""
34
- text = self.text
35
- if len(text) > DEFAULT_TEXT_CONTENT_LIMIT:
36
- logger.warning(
37
- f"TextContent text length ({len(text)}) exceeds limit "
38
- f"({DEFAULT_TEXT_CONTENT_LIMIT}), truncating"
39
- )
40
- text = maybe_truncate(text, DEFAULT_TEXT_CONTENT_LIMIT)
41
-
42
- data: dict[str, str | dict[str, str]] = {
43
- "type": self.type,
44
- "text": text,
45
- }
46
- if self.cache_prompt:
47
- data["cache_control"] = {"type": "ephemeral"}
48
- return data
49
-
50
-
51
- class ImageContent(mcp.types.ImageContent, BaseContent):
52
- type: Literal["image"] = "image"
53
- image_urls: list[str]
54
- # We use populate_by_name since mcp.types.ImageContent
55
- # alias meta -> _meta, but .model_dumps() will output "meta"
56
- model_config = ConfigDict(extra="forbid", populate_by_name=True)
57
-
58
- def to_llm_dict(self) -> list[dict[str, str | dict[str, str]]]:
59
- """Convert to LLM API format."""
60
- images: list[dict[str, str | dict[str, str]]] = []
61
- for url in self.image_urls:
62
- images.append({"type": "image_url", "image_url": {"url": url}})
63
- if self.cache_prompt and images:
64
- images[-1]["cache_control"] = {"type": "ephemeral"}
65
- return images
66
-
67
-
68
- class Message(BaseModel):
69
- # NOTE: this is not the same as EventSource
70
- # These are the roles in the LLM's APIs
71
- role: Literal["user", "system", "assistant", "tool"]
72
- content: list[TextContent | ImageContent] = Field(default_factory=list)
73
- cache_enabled: bool = False
74
- vision_enabled: bool = False
75
- # function calling
76
- function_calling_enabled: bool = False
77
- # - tool calls (from LLM)
78
- tool_calls: list[ChatCompletionMessageToolCall] | None = None
79
- # - tool execution result (to LLM)
80
- tool_call_id: str | None = None
81
- name: str | None = None # name of the tool
82
- # force string serializer
83
- force_string_serializer: bool = False
84
- # reasoning content (from reasoning models like o1, Claude thinking, DeepSeek R1)
85
- reasoning_content: str | None = Field(
86
- default=None,
87
- description="Intermediate reasoning/thinking content from reasoning models",
88
- )
89
-
90
- @property
91
- def contains_image(self) -> bool:
92
- return any(isinstance(content, ImageContent) for content in self.content)
93
-
94
- def to_llm_dict(self) -> dict[str, Any]:
95
- """Serialize message for LLM API consumption.
96
-
97
- This method chooses the appropriate serialization format based on the message
98
- configuration and provider capabilities:
99
- - String format: for providers that don't support list of content items
100
- - List format: for providers with vision/prompt caching/tool calls support
101
- """
102
- if not self.force_string_serializer and (
103
- self.cache_enabled or self.vision_enabled or self.function_calling_enabled
104
- ):
105
- message_dict = self._list_serializer()
106
- else:
107
- # some providers, like HF and Groq/llama, don't support a list here, but a
108
- # single string
109
- message_dict = self._string_serializer()
110
-
111
- return message_dict
112
-
113
- def _string_serializer(self) -> dict[str, Any]:
114
- # convert content to a single string
115
- content = "\n".join(
116
- item.text for item in self.content if isinstance(item, TextContent)
117
- )
118
- message_dict: dict[str, Any] = {"content": content, "role": self.role}
119
-
120
- # add tool call keys if we have a tool call or response
121
- return self._add_tool_call_keys(message_dict)
122
-
123
- def _list_serializer(self) -> dict[str, Any]:
124
- content: list[dict[str, Any]] = []
125
- role_tool_with_prompt_caching = False
126
-
127
- for item in self.content:
128
- # Serialize with the subclass-specific return type
129
- raw = item.to_llm_dict()
130
- # We have to remove cache_prompt for tool content and move it up to the
131
- # message level
132
- # See discussion here for details: https://github.com/BerriAI/litellm/issues/6422#issuecomment-2438765472
133
- if isinstance(item, TextContent):
134
- d = cast(dict[str, Any], raw)
135
- if self.role == "tool" and item.cache_prompt:
136
- role_tool_with_prompt_caching = True
137
- d.pop("cache_control", None)
138
- content.append(d)
139
-
140
- elif isinstance(item, ImageContent) and self.vision_enabled:
141
- # ImageContent.model_dump() always returns a list of dicts
142
- d_list = cast(list[dict[str, Any]], raw)
143
- if self.role == "tool" and item.cache_prompt:
144
- role_tool_with_prompt_caching = True
145
- for elem in d_list:
146
- elem.pop("cache_control", None)
147
- content.extend(d_list)
148
-
149
- message_dict: dict[str, Any] = {"content": content, "role": self.role}
150
- if role_tool_with_prompt_caching:
151
- message_dict["cache_control"] = {"type": "ephemeral"}
152
-
153
- return self._add_tool_call_keys(message_dict)
154
-
155
- def _add_tool_call_keys(self, message_dict: dict[str, Any]) -> dict[str, Any]:
156
- """Add tool call keys if we have a tool call or response.
157
-
158
- NOTE: this is necessary for both native and non-native tool calling
159
- """
160
- # an assistant message calling a tool
161
- if self.tool_calls is not None:
162
- message_dict["tool_calls"] = [
163
- {
164
- "id": tool_call.id,
165
- "type": "function",
166
- "function": {
167
- "name": tool_call.function.name,
168
- "arguments": tool_call.function.arguments,
169
- },
170
- }
171
- for tool_call in self.tool_calls
172
- ]
173
-
174
- # an observation message with tool response
175
- if self.tool_call_id is not None:
176
- assert self.name is not None, (
177
- "name is required when tool_call_id is not None"
178
- )
179
- message_dict["tool_call_id"] = self.tool_call_id
180
- message_dict["name"] = self.name
181
-
182
- return message_dict
183
-
184
- @classmethod
185
- def from_litellm_message(cls, message: LiteLLMMessage) -> "Message":
186
- """Convert a LiteLLMMessage to our Message class.
187
-
188
- Provider-agnostic mapping for reasoning:
189
- - Prefer `message.reasoning_content` if present (LiteLLM normalized field)
190
- """
191
- assert message.role != "function", "Function role is not supported"
192
-
193
- rc = getattr(message, "reasoning_content", None)
194
-
195
- return Message(
196
- role=message.role,
197
- content=[TextContent(text=message.content)]
198
- if isinstance(message.content, str)
199
- else [],
200
- tool_calls=message.tool_calls,
201
- reasoning_content=rc,
202
- )
203
-
204
-
205
- def content_to_str(contents: list[TextContent | ImageContent]) -> list[str]:
206
- """Convert a list of TextContent and ImageContent to a list of strings.
207
-
208
- This is primarily used for display purposes.
209
- """
210
- text_parts = []
211
- for content_item in contents:
212
- if isinstance(content_item, TextContent):
213
- text_parts.append(content_item.text)
214
- elif isinstance(content_item, ImageContent):
215
- text_parts.append(f"[Image: {len(content_item.image_urls)} URLs]")
216
- return text_parts
@@ -1,34 +0,0 @@
1
- import os
2
-
3
-
4
- def get_llm_metadata(
5
- model_name: str,
6
- agent_name: str,
7
- session_id: str | None = None,
8
- user_id: str | None = None,
9
- ) -> dict:
10
- import openhands.sdk
11
-
12
- openhands_tools_version: str = "n/a"
13
- try:
14
- import openhands.tools
15
-
16
- openhands_tools_version = openhands.tools.__version__
17
- except ModuleNotFoundError:
18
- pass
19
-
20
- metadata = {
21
- "trace_version": openhands.sdk.__version__,
22
- "tags": [
23
- f"model:{model_name}",
24
- f"agent:{agent_name}",
25
- f"web_host:{os.environ.get('WEB_HOST', 'unspecified')}",
26
- f"openhands_version:{openhands.sdk.__version__}",
27
- f"openhands_tools_version:{openhands_tools_version}",
28
- ],
29
- }
30
- if session_id is not None:
31
- metadata["session_id"] = session_id
32
- if user_id is not None:
33
- metadata["trace_user_id"] = user_id
34
- return metadata