openhands 0.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openhands might be problematic. Click here for more details.
- openhands-1.0.1.dist-info/METADATA +52 -0
- openhands-1.0.1.dist-info/RECORD +31 -0
- {openhands-0.0.0.dist-info → openhands-1.0.1.dist-info}/WHEEL +1 -2
- openhands-1.0.1.dist-info/entry_points.txt +2 -0
- openhands_cli/__init__.py +8 -0
- openhands_cli/agent_chat.py +186 -0
- openhands_cli/argparsers/main_parser.py +56 -0
- openhands_cli/argparsers/serve_parser.py +31 -0
- openhands_cli/gui_launcher.py +220 -0
- openhands_cli/listeners/__init__.py +4 -0
- openhands_cli/listeners/loading_listener.py +63 -0
- openhands_cli/listeners/pause_listener.py +83 -0
- openhands_cli/llm_utils.py +57 -0
- openhands_cli/locations.py +13 -0
- openhands_cli/pt_style.py +30 -0
- openhands_cli/runner.py +178 -0
- openhands_cli/setup.py +116 -0
- openhands_cli/simple_main.py +59 -0
- openhands_cli/tui/__init__.py +5 -0
- openhands_cli/tui/settings/mcp_screen.py +217 -0
- openhands_cli/tui/settings/settings_screen.py +202 -0
- openhands_cli/tui/settings/store.py +93 -0
- openhands_cli/tui/status.py +109 -0
- openhands_cli/tui/tui.py +100 -0
- openhands_cli/tui/utils.py +14 -0
- openhands_cli/user_actions/__init__.py +17 -0
- openhands_cli/user_actions/agent_action.py +95 -0
- openhands_cli/user_actions/exit_session.py +18 -0
- openhands_cli/user_actions/settings_action.py +171 -0
- openhands_cli/user_actions/types.py +18 -0
- openhands_cli/user_actions/utils.py +199 -0
- openhands/__init__.py +0 -1
- openhands/sdk/__init__.py +0 -45
- openhands/sdk/agent/__init__.py +0 -8
- openhands/sdk/agent/agent/__init__.py +0 -6
- openhands/sdk/agent/agent/agent.py +0 -349
- openhands/sdk/agent/base.py +0 -103
- openhands/sdk/context/__init__.py +0 -28
- openhands/sdk/context/agent_context.py +0 -153
- openhands/sdk/context/condenser/__init__.py +0 -5
- openhands/sdk/context/condenser/condenser.py +0 -73
- openhands/sdk/context/condenser/no_op_condenser.py +0 -13
- openhands/sdk/context/manager.py +0 -5
- openhands/sdk/context/microagents/__init__.py +0 -26
- openhands/sdk/context/microagents/exceptions.py +0 -11
- openhands/sdk/context/microagents/microagent.py +0 -345
- openhands/sdk/context/microagents/types.py +0 -70
- openhands/sdk/context/utils/__init__.py +0 -8
- openhands/sdk/context/utils/prompt.py +0 -52
- openhands/sdk/context/view.py +0 -116
- openhands/sdk/conversation/__init__.py +0 -12
- openhands/sdk/conversation/conversation.py +0 -207
- openhands/sdk/conversation/state.py +0 -50
- openhands/sdk/conversation/types.py +0 -6
- openhands/sdk/conversation/visualizer.py +0 -300
- openhands/sdk/event/__init__.py +0 -27
- openhands/sdk/event/base.py +0 -148
- openhands/sdk/event/condenser.py +0 -49
- openhands/sdk/event/llm_convertible.py +0 -265
- openhands/sdk/event/types.py +0 -5
- openhands/sdk/event/user_action.py +0 -12
- openhands/sdk/event/utils.py +0 -30
- openhands/sdk/llm/__init__.py +0 -19
- openhands/sdk/llm/exceptions.py +0 -108
- openhands/sdk/llm/llm.py +0 -867
- openhands/sdk/llm/llm_registry.py +0 -116
- openhands/sdk/llm/message.py +0 -216
- openhands/sdk/llm/metadata.py +0 -34
- openhands/sdk/llm/utils/fn_call_converter.py +0 -1049
- openhands/sdk/llm/utils/metrics.py +0 -311
- openhands/sdk/llm/utils/model_features.py +0 -153
- openhands/sdk/llm/utils/retry_mixin.py +0 -122
- openhands/sdk/llm/utils/telemetry.py +0 -252
- openhands/sdk/logger.py +0 -167
- openhands/sdk/mcp/__init__.py +0 -20
- openhands/sdk/mcp/client.py +0 -113
- openhands/sdk/mcp/definition.py +0 -69
- openhands/sdk/mcp/tool.py +0 -104
- openhands/sdk/mcp/utils.py +0 -59
- openhands/sdk/tests/llm/test_llm.py +0 -447
- openhands/sdk/tests/llm/test_llm_fncall_converter.py +0 -691
- openhands/sdk/tests/llm/test_model_features.py +0 -221
- openhands/sdk/tool/__init__.py +0 -30
- openhands/sdk/tool/builtins/__init__.py +0 -34
- openhands/sdk/tool/builtins/finish.py +0 -57
- openhands/sdk/tool/builtins/think.py +0 -60
- openhands/sdk/tool/schema.py +0 -236
- openhands/sdk/tool/security_prompt.py +0 -5
- openhands/sdk/tool/tool.py +0 -142
- openhands/sdk/utils/__init__.py +0 -14
- openhands/sdk/utils/discriminated_union.py +0 -210
- openhands/sdk/utils/json.py +0 -48
- openhands/sdk/utils/truncate.py +0 -44
- openhands/tools/__init__.py +0 -44
- openhands/tools/execute_bash/__init__.py +0 -30
- openhands/tools/execute_bash/constants.py +0 -31
- openhands/tools/execute_bash/definition.py +0 -166
- openhands/tools/execute_bash/impl.py +0 -38
- openhands/tools/execute_bash/metadata.py +0 -101
- openhands/tools/execute_bash/terminal/__init__.py +0 -22
- openhands/tools/execute_bash/terminal/factory.py +0 -113
- openhands/tools/execute_bash/terminal/interface.py +0 -189
- openhands/tools/execute_bash/terminal/subprocess_terminal.py +0 -412
- openhands/tools/execute_bash/terminal/terminal_session.py +0 -492
- openhands/tools/execute_bash/terminal/tmux_terminal.py +0 -160
- openhands/tools/execute_bash/utils/command.py +0 -150
- openhands/tools/str_replace_editor/__init__.py +0 -17
- openhands/tools/str_replace_editor/definition.py +0 -158
- openhands/tools/str_replace_editor/editor.py +0 -683
- openhands/tools/str_replace_editor/exceptions.py +0 -41
- openhands/tools/str_replace_editor/impl.py +0 -66
- openhands/tools/str_replace_editor/utils/__init__.py +0 -0
- openhands/tools/str_replace_editor/utils/config.py +0 -2
- openhands/tools/str_replace_editor/utils/constants.py +0 -9
- openhands/tools/str_replace_editor/utils/encoding.py +0 -135
- openhands/tools/str_replace_editor/utils/file_cache.py +0 -154
- openhands/tools/str_replace_editor/utils/history.py +0 -122
- openhands/tools/str_replace_editor/utils/shell.py +0 -72
- openhands/tools/task_tracker/__init__.py +0 -16
- openhands/tools/task_tracker/definition.py +0 -336
- openhands/tools/utils/__init__.py +0 -1
- openhands-0.0.0.dist-info/METADATA +0 -3
- openhands-0.0.0.dist-info/RECORD +0 -94
- openhands-0.0.0.dist-info/top_level.txt +0 -1
|
@@ -1,252 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import os
|
|
3
|
-
import time
|
|
4
|
-
import warnings
|
|
5
|
-
from typing import Any, Optional
|
|
6
|
-
|
|
7
|
-
from litellm.cost_calculator import completion_cost as litellm_completion_cost
|
|
8
|
-
from litellm.types.utils import CostPerToken, ModelResponse, Usage
|
|
9
|
-
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
|
|
10
|
-
|
|
11
|
-
from openhands.sdk.llm.utils.metrics import Metrics
|
|
12
|
-
from openhands.sdk.logger import get_logger
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
logger = get_logger(__name__)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class Telemetry(BaseModel):
|
|
19
|
-
"""
|
|
20
|
-
Handles latency, token/cost accounting, and optional logging.
|
|
21
|
-
All runtime state (like start times) lives in private attrs.
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
# --- Config fields ---
|
|
25
|
-
model_name: str = Field(default="unknown", description="Name of the LLM model")
|
|
26
|
-
log_enabled: bool = Field(default=False, description="Whether to log completions")
|
|
27
|
-
log_dir: Optional[str] = Field(
|
|
28
|
-
default=None, description="Directory to write logs if enabled"
|
|
29
|
-
)
|
|
30
|
-
input_cost_per_token: float | None = Field(
|
|
31
|
-
default=None, description="Custom Input cost per token (USD)"
|
|
32
|
-
)
|
|
33
|
-
output_cost_per_token: float | None = Field(
|
|
34
|
-
default=None, description="Custom Output cost per token (USD)"
|
|
35
|
-
)
|
|
36
|
-
|
|
37
|
-
metrics: Metrics = Field(..., description="Metrics collector instance")
|
|
38
|
-
|
|
39
|
-
# --- Runtime fields (not serialized) ---
|
|
40
|
-
_req_start: float = PrivateAttr(default=0.0)
|
|
41
|
-
_req_ctx: dict[str, Any] = PrivateAttr(default_factory=dict)
|
|
42
|
-
_last_latency: float = PrivateAttr(default=0.0)
|
|
43
|
-
|
|
44
|
-
model_config = ConfigDict(extra="forbid", arbitrary_types_allowed=True)
|
|
45
|
-
|
|
46
|
-
# ---------- Lifecycle ----------
|
|
47
|
-
def on_request(self, log_ctx: dict | None) -> None:
|
|
48
|
-
self._req_start = time.time()
|
|
49
|
-
self._req_ctx = log_ctx or {}
|
|
50
|
-
|
|
51
|
-
def on_response(
|
|
52
|
-
self, resp: ModelResponse, raw_resp: ModelResponse | None = None
|
|
53
|
-
) -> Metrics:
|
|
54
|
-
"""
|
|
55
|
-
Side-effects:
|
|
56
|
-
- records latency, tokens, cost into Metrics
|
|
57
|
-
- optionally writes a JSON log file
|
|
58
|
-
"""
|
|
59
|
-
# 1) latency
|
|
60
|
-
self._last_latency = time.time() - (self._req_start or time.time())
|
|
61
|
-
response_id = resp.id
|
|
62
|
-
self.metrics.add_response_latency(self._last_latency, response_id)
|
|
63
|
-
|
|
64
|
-
# 2) cost
|
|
65
|
-
cost = self._compute_cost(resp)
|
|
66
|
-
if cost:
|
|
67
|
-
self.metrics.add_cost(cost)
|
|
68
|
-
|
|
69
|
-
# 3) tokens - handle both dict and ModelResponse objects
|
|
70
|
-
if isinstance(resp, dict):
|
|
71
|
-
usage = resp.get("usage")
|
|
72
|
-
else:
|
|
73
|
-
usage = getattr(resp, "usage", None)
|
|
74
|
-
|
|
75
|
-
if usage and self._has_meaningful_usage(usage):
|
|
76
|
-
self._record_usage(
|
|
77
|
-
usage, response_id, self._req_ctx.get("context_window", 0)
|
|
78
|
-
)
|
|
79
|
-
|
|
80
|
-
# 4) optional logging
|
|
81
|
-
if self.log_enabled:
|
|
82
|
-
self._log_completion(resp, cost, raw_resp=raw_resp)
|
|
83
|
-
|
|
84
|
-
return self.metrics.deep_copy()
|
|
85
|
-
|
|
86
|
-
def on_error(self, err: Exception) -> None:
|
|
87
|
-
# Stub for error tracking / counters
|
|
88
|
-
return
|
|
89
|
-
|
|
90
|
-
# ---------- Helpers ----------
|
|
91
|
-
def _has_meaningful_usage(self, usage) -> bool:
|
|
92
|
-
"""Check if usage has meaningful (non-zero) token counts."""
|
|
93
|
-
if not usage:
|
|
94
|
-
return False
|
|
95
|
-
|
|
96
|
-
# Handle MagicMock objects safely
|
|
97
|
-
try:
|
|
98
|
-
if isinstance(usage, dict):
|
|
99
|
-
prompt_tokens = usage.get("prompt_tokens", 0) or 0
|
|
100
|
-
completion_tokens = usage.get("completion_tokens", 0) or 0
|
|
101
|
-
else:
|
|
102
|
-
prompt_tokens = getattr(usage, "prompt_tokens", 0) or 0
|
|
103
|
-
completion_tokens = getattr(usage, "completion_tokens", 0) or 0
|
|
104
|
-
|
|
105
|
-
# Convert to int safely (handles MagicMock objects)
|
|
106
|
-
prompt_tokens = int(prompt_tokens) if str(prompt_tokens).isdigit() else 0
|
|
107
|
-
completion_tokens = (
|
|
108
|
-
int(completion_tokens) if str(completion_tokens).isdigit() else 0
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
return prompt_tokens > 0 or completion_tokens > 0
|
|
112
|
-
except (ValueError, TypeError, AttributeError):
|
|
113
|
-
return False
|
|
114
|
-
|
|
115
|
-
def _record_usage(
|
|
116
|
-
self, usage: Usage, response_id: str, context_window: int
|
|
117
|
-
) -> None:
|
|
118
|
-
# Handle both dict and Usage objects
|
|
119
|
-
if isinstance(usage, dict):
|
|
120
|
-
usage = Usage.model_validate(usage)
|
|
121
|
-
|
|
122
|
-
prompt_tokens = usage.prompt_tokens or 0
|
|
123
|
-
completion_tokens = usage.completion_tokens or 0
|
|
124
|
-
cache_write = usage._cache_creation_input_tokens or 0
|
|
125
|
-
|
|
126
|
-
cache_read = 0
|
|
127
|
-
prompt_token_details = usage.prompt_tokens_details or None
|
|
128
|
-
if prompt_token_details and prompt_token_details.cached_tokens:
|
|
129
|
-
cache_read = prompt_token_details.cached_tokens
|
|
130
|
-
|
|
131
|
-
reasoning_tokens = 0
|
|
132
|
-
completion_tokens_details = usage.completion_tokens_details or None
|
|
133
|
-
if completion_tokens_details and completion_tokens_details.reasoning_tokens:
|
|
134
|
-
reasoning_tokens = completion_tokens_details.reasoning_tokens
|
|
135
|
-
|
|
136
|
-
self.metrics.add_token_usage(
|
|
137
|
-
prompt_tokens=prompt_tokens,
|
|
138
|
-
completion_tokens=completion_tokens,
|
|
139
|
-
cache_read_tokens=cache_read,
|
|
140
|
-
cache_write_tokens=cache_write,
|
|
141
|
-
reasoning_tokens=reasoning_tokens,
|
|
142
|
-
context_window=context_window,
|
|
143
|
-
response_id=response_id,
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
def _compute_cost(self, resp: ModelResponse) -> Optional[float]:
|
|
147
|
-
"""Try provider header → litellm direct. Return None on failure."""
|
|
148
|
-
extra_kwargs = {}
|
|
149
|
-
if (
|
|
150
|
-
self.input_cost_per_token is not None
|
|
151
|
-
and self.output_cost_per_token is not None
|
|
152
|
-
):
|
|
153
|
-
cost_per_token = CostPerToken(
|
|
154
|
-
input_cost_per_token=self.input_cost_per_token,
|
|
155
|
-
output_cost_per_token=self.output_cost_per_token,
|
|
156
|
-
)
|
|
157
|
-
logger.debug(f"Using custom cost per token: {cost_per_token}")
|
|
158
|
-
extra_kwargs["custom_cost_per_token"] = cost_per_token
|
|
159
|
-
|
|
160
|
-
try:
|
|
161
|
-
hidden = getattr(resp, "_hidden_params", {}) or {}
|
|
162
|
-
cost = hidden.get("additional_headers", {}).get(
|
|
163
|
-
"llm_provider-x-litellm-response-cost"
|
|
164
|
-
)
|
|
165
|
-
if cost is not None:
|
|
166
|
-
return float(cost)
|
|
167
|
-
except Exception as e:
|
|
168
|
-
logger.debug(f"Failed to get cost from LiteLLM headers: {e}")
|
|
169
|
-
|
|
170
|
-
# move on to litellm cost calculator
|
|
171
|
-
# Handle model name properly - if it doesn't contain "/", use as-is
|
|
172
|
-
model_parts = self.model_name.split("/")
|
|
173
|
-
if len(model_parts) > 1:
|
|
174
|
-
extra_kwargs["model"] = "/".join(model_parts[1:])
|
|
175
|
-
else:
|
|
176
|
-
extra_kwargs["model"] = self.model_name
|
|
177
|
-
try:
|
|
178
|
-
return float(
|
|
179
|
-
litellm_completion_cost(completion_response=resp, **extra_kwargs)
|
|
180
|
-
)
|
|
181
|
-
except Exception as e:
|
|
182
|
-
warnings.warn(f"Cost calculation failed: {e}")
|
|
183
|
-
return None
|
|
184
|
-
|
|
185
|
-
def _log_completion(
|
|
186
|
-
self,
|
|
187
|
-
resp: ModelResponse,
|
|
188
|
-
cost: Optional[float],
|
|
189
|
-
raw_resp: ModelResponse | None = None,
|
|
190
|
-
) -> None:
|
|
191
|
-
if not self.log_dir:
|
|
192
|
-
return
|
|
193
|
-
try:
|
|
194
|
-
# Only log if directory exists and is writable.
|
|
195
|
-
# Do not create directories implicitly.
|
|
196
|
-
if not os.path.isdir(self.log_dir):
|
|
197
|
-
raise FileNotFoundError(f"log_dir does not exist: {self.log_dir}")
|
|
198
|
-
if not os.access(self.log_dir, os.W_OK):
|
|
199
|
-
raise PermissionError(f"log_dir is not writable: {self.log_dir}")
|
|
200
|
-
|
|
201
|
-
fname = os.path.join(
|
|
202
|
-
self.log_dir,
|
|
203
|
-
f"{self.model_name.replace('/', '__')}-{time.time():.3f}.json",
|
|
204
|
-
)
|
|
205
|
-
data = self._req_ctx.copy()
|
|
206
|
-
data["response"] = resp.model_dump()
|
|
207
|
-
data["cost"] = float(cost or 0.0)
|
|
208
|
-
data["timestamp"] = time.time()
|
|
209
|
-
data["latency_sec"] = self._last_latency
|
|
210
|
-
|
|
211
|
-
# Usage summary (prompt, completion, reasoning tokens) for quick inspection
|
|
212
|
-
try:
|
|
213
|
-
usage = getattr(resp, "usage", None)
|
|
214
|
-
if usage:
|
|
215
|
-
if isinstance(usage, dict):
|
|
216
|
-
usage = Usage.model_validate(usage)
|
|
217
|
-
prompt_tokens = int(usage.prompt_tokens or 0)
|
|
218
|
-
completion_tokens = int(usage.completion_tokens or 0)
|
|
219
|
-
reasoning_tokens = 0
|
|
220
|
-
details = usage.completion_tokens_details or None
|
|
221
|
-
if details and details.reasoning_tokens:
|
|
222
|
-
reasoning_tokens = int(details.reasoning_tokens)
|
|
223
|
-
data["usage_summary"] = {
|
|
224
|
-
"prompt_tokens": prompt_tokens,
|
|
225
|
-
"completion_tokens": completion_tokens,
|
|
226
|
-
"reasoning_tokens": reasoning_tokens,
|
|
227
|
-
}
|
|
228
|
-
if usage.prompt_tokens_details:
|
|
229
|
-
data["usage_summary"]["cache_read_tokens"] = int(
|
|
230
|
-
usage.prompt_tokens_details.cached_tokens or 0
|
|
231
|
-
)
|
|
232
|
-
except Exception:
|
|
233
|
-
# Best-effort only; don't fail logging
|
|
234
|
-
pass
|
|
235
|
-
|
|
236
|
-
# Raw response *before* nonfncall -> call conversion
|
|
237
|
-
if raw_resp:
|
|
238
|
-
data["raw_response"] = raw_resp
|
|
239
|
-
# pop duplicated tools
|
|
240
|
-
if "tool" in data and "tool" in data.get("kwargs", {}):
|
|
241
|
-
data["kwargs"].pop("tool")
|
|
242
|
-
with open(fname, "w") as f:
|
|
243
|
-
f.write(json.dumps(data, default=_safe_json))
|
|
244
|
-
except Exception as e:
|
|
245
|
-
warnings.warn(f"Telemetry logging failed: {e}")
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
def _safe_json(obj: Any) -> Any:
|
|
249
|
-
try:
|
|
250
|
-
return obj.__dict__
|
|
251
|
-
except Exception:
|
|
252
|
-
return str(obj)
|
openhands/sdk/logger.py
DELETED
|
@@ -1,167 +0,0 @@
|
|
|
1
|
-
# simple_logger.py
|
|
2
|
-
"""
|
|
3
|
-
Minimal logger setup that encourages per-module loggers,
|
|
4
|
-
with Rich for humans and JSON for machines.
|
|
5
|
-
|
|
6
|
-
Usage:
|
|
7
|
-
from openhands.sdk.logger import get_logger
|
|
8
|
-
logger = get_logger(__name__)
|
|
9
|
-
logger.info("Hello from this module!")
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
import logging
|
|
13
|
-
import os
|
|
14
|
-
from logging.handlers import TimedRotatingFileHandler
|
|
15
|
-
|
|
16
|
-
import litellm
|
|
17
|
-
from pythonjsonlogger.json import JsonFormatter
|
|
18
|
-
from rich.console import Console
|
|
19
|
-
from rich.logging import RichHandler
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
# ========= ENV (loaded at import) =========
|
|
23
|
-
LEVEL_MAP = (
|
|
24
|
-
logging.getLevelNamesMapping()
|
|
25
|
-
if hasattr(logging, "getLevelNamesMapping")
|
|
26
|
-
else logging._nameToLevel
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
DEBUG = os.environ.get("DEBUG", "false").lower() in {"1", "true", "yes"}
|
|
30
|
-
ENV_LOG_LEVEL_STR = os.getenv("LOG_LEVEL", "INFO").upper()
|
|
31
|
-
ENV_LOG_LEVEL = LEVEL_MAP.get(ENV_LOG_LEVEL_STR, logging.INFO)
|
|
32
|
-
if DEBUG:
|
|
33
|
-
ENV_LOG_LEVEL = logging.DEBUG
|
|
34
|
-
|
|
35
|
-
ENV_LOG_TO_FILE = os.getenv("LOG_TO_FILE", "false").lower() in {"1", "true", "yes"}
|
|
36
|
-
ENV_LOG_DIR = os.getenv("LOG_DIR", "logs")
|
|
37
|
-
ENV_ROTATE_WHEN = os.getenv("LOG_ROTATE_WHEN", "midnight")
|
|
38
|
-
ENV_BACKUP_COUNT = int(os.getenv("LOG_BACKUP_COUNT", "7"))
|
|
39
|
-
|
|
40
|
-
# Rich vs JSON
|
|
41
|
-
ENV_JSON = os.getenv("LOG_JSON", "false").lower() in {"1", "true", "yes"}
|
|
42
|
-
IN_CI = os.getenv("CI", "false").lower() in {"1", "true", "yes"}
|
|
43
|
-
ENV_RICH_TRACEBACKS = os.getenv("LOG_RICH_TRACEBACKS", "true").lower() in {
|
|
44
|
-
"1",
|
|
45
|
-
"true",
|
|
46
|
-
"yes",
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
ENV_AUTO_CONFIG = os.getenv("LOG_AUTO_CONFIG", "true").lower() in {"1", "true", "yes"}
|
|
51
|
-
ENV_DEBUG_LLM = os.getenv("DEBUG_LLM", "false").lower() in {"1", "true", "yes"}
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
# ========= LiteLLM controls =========
|
|
55
|
-
_ENABLE_LITELLM_DEBUG = False
|
|
56
|
-
if ENV_DEBUG_LLM:
|
|
57
|
-
confirmation = input(
|
|
58
|
-
(
|
|
59
|
-
"\n⚠️ WARNING: You are enabling DEBUG_LLM which may expose sensitive "
|
|
60
|
-
"information like API keys.\nThis should NEVER be enabled in production.\n"
|
|
61
|
-
"Type 'y' to confirm you understand the risks: "
|
|
62
|
-
)
|
|
63
|
-
)
|
|
64
|
-
if confirmation.lower() == "y":
|
|
65
|
-
_ENABLE_LITELLM_DEBUG = True
|
|
66
|
-
litellm.suppress_debug_info = False
|
|
67
|
-
litellm.set_verbose = True # type: ignore
|
|
68
|
-
else:
|
|
69
|
-
print("DEBUG_LLM disabled due to lack of confirmation")
|
|
70
|
-
litellm.suppress_debug_info = True
|
|
71
|
-
litellm.set_verbose = False # type: ignore
|
|
72
|
-
else:
|
|
73
|
-
litellm.suppress_debug_info = True
|
|
74
|
-
litellm.set_verbose = False # type: ignore
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
def disable_logger(name: str, level: int = logging.CRITICAL) -> None:
|
|
78
|
-
"""Disable or quiet down a specific logger by name."""
|
|
79
|
-
logger = logging.getLogger(name)
|
|
80
|
-
logger.setLevel(level)
|
|
81
|
-
logger.propagate = False
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
# Quiet chatty third-party loggers
|
|
85
|
-
for name in ["litellm", "LiteLLM", "openai"]:
|
|
86
|
-
disable_logger(name, logging.DEBUG if _ENABLE_LITELLM_DEBUG else logging.WARNING)
|
|
87
|
-
for name in ["httpcore", "httpx", "libtmux"]:
|
|
88
|
-
disable_logger(name, logging.WARNING)
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
# ========= SETUP =========
|
|
92
|
-
def setup_logging(
|
|
93
|
-
level: int | None = None,
|
|
94
|
-
log_to_file: bool | None = None,
|
|
95
|
-
log_dir: str | None = None,
|
|
96
|
-
fmt: str | None = None,
|
|
97
|
-
when: str | None = None,
|
|
98
|
-
backup_count: int | None = None,
|
|
99
|
-
) -> None:
|
|
100
|
-
"""Configure the root logger. All child loggers inherit this setup."""
|
|
101
|
-
lvl = ENV_LOG_LEVEL if level is None else level
|
|
102
|
-
to_file = ENV_LOG_TO_FILE if log_to_file is None else log_to_file
|
|
103
|
-
directory = ENV_LOG_DIR if log_dir is None else log_dir
|
|
104
|
-
rotate_when = ENV_ROTATE_WHEN if when is None else when
|
|
105
|
-
keep = ENV_BACKUP_COUNT if backup_count is None else backup_count
|
|
106
|
-
|
|
107
|
-
root = logging.getLogger()
|
|
108
|
-
root.setLevel(lvl)
|
|
109
|
-
root.handlers = [] # reset
|
|
110
|
-
|
|
111
|
-
if ENV_JSON or IN_CI:
|
|
112
|
-
# JSON console handler
|
|
113
|
-
ch = logging.StreamHandler()
|
|
114
|
-
ch.setLevel(lvl)
|
|
115
|
-
ch.setFormatter(
|
|
116
|
-
JsonFormatter(
|
|
117
|
-
fmt="%(asctime)s %(levelname)s %(name)s "
|
|
118
|
-
"%(filename)s %(lineno)d %(message)s"
|
|
119
|
-
)
|
|
120
|
-
)
|
|
121
|
-
root.addHandler(ch)
|
|
122
|
-
else:
|
|
123
|
-
# Rich console handler
|
|
124
|
-
rich_handler = RichHandler(
|
|
125
|
-
console=Console(stderr=True),
|
|
126
|
-
log_time_format="[%x %H:%M:%S.%f]",
|
|
127
|
-
omit_repeated_times=False,
|
|
128
|
-
rich_tracebacks=ENV_RICH_TRACEBACKS,
|
|
129
|
-
)
|
|
130
|
-
rich_handler.setFormatter(logging.Formatter("%(message)s"))
|
|
131
|
-
rich_handler.setLevel(lvl)
|
|
132
|
-
root.addHandler(rich_handler)
|
|
133
|
-
|
|
134
|
-
if to_file:
|
|
135
|
-
os.makedirs(directory, exist_ok=True)
|
|
136
|
-
fh = TimedRotatingFileHandler(
|
|
137
|
-
os.path.join(directory, "app.log"),
|
|
138
|
-
when=rotate_when,
|
|
139
|
-
backupCount=keep,
|
|
140
|
-
encoding="utf-8",
|
|
141
|
-
)
|
|
142
|
-
fh.setLevel(lvl)
|
|
143
|
-
if ENV_JSON:
|
|
144
|
-
fh.setFormatter(
|
|
145
|
-
JsonFormatter(
|
|
146
|
-
fmt="%(asctime)s %(levelname)s %(name)s "
|
|
147
|
-
"%(filename)s %(lineno)d %(message)s"
|
|
148
|
-
)
|
|
149
|
-
)
|
|
150
|
-
else:
|
|
151
|
-
log_fmt = (
|
|
152
|
-
fmt
|
|
153
|
-
or "%(asctime)s - %(levelname)s - %(name)s "
|
|
154
|
-
"- %(filename)s:%(lineno)d - %(message)s"
|
|
155
|
-
)
|
|
156
|
-
fh.setFormatter(logging.Formatter(log_fmt))
|
|
157
|
-
root.addHandler(fh)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
def get_logger(name: str) -> logging.Logger:
|
|
161
|
-
"""Return a logger for the given module name."""
|
|
162
|
-
return logging.getLogger(name)
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
# Auto-configure if desired
|
|
166
|
-
if ENV_AUTO_CONFIG:
|
|
167
|
-
setup_logging()
|
openhands/sdk/mcp/__init__.py
DELETED
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
"""MCP (Model Context Protocol) integration for agent-sdk."""
|
|
2
|
-
|
|
3
|
-
from openhands.sdk.mcp.client import MCPClient
|
|
4
|
-
from openhands.sdk.mcp.definition import MCPToolObservation
|
|
5
|
-
from openhands.sdk.mcp.tool import (
|
|
6
|
-
MCPTool,
|
|
7
|
-
MCPToolExecutor,
|
|
8
|
-
)
|
|
9
|
-
from openhands.sdk.mcp.utils import (
|
|
10
|
-
create_mcp_tools,
|
|
11
|
-
)
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
__all__ = [
|
|
15
|
-
"MCPClient",
|
|
16
|
-
"MCPTool",
|
|
17
|
-
"MCPToolObservation",
|
|
18
|
-
"MCPToolExecutor",
|
|
19
|
-
"create_mcp_tools",
|
|
20
|
-
]
|
openhands/sdk/mcp/client.py
DELETED
|
@@ -1,113 +0,0 @@
|
|
|
1
|
-
"""Minimal sync helpers on top of fastmcp.Client, preserving original behavior."""
|
|
2
|
-
|
|
3
|
-
import asyncio
|
|
4
|
-
import inspect
|
|
5
|
-
import threading
|
|
6
|
-
from typing import Any, Callable
|
|
7
|
-
|
|
8
|
-
from fastmcp import Client as AsyncMCPClient
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class MCPClient(AsyncMCPClient):
|
|
12
|
-
"""
|
|
13
|
-
Behaves exactly like fastmcp.Client (same constructor & async API),
|
|
14
|
-
but owns a background event loop and offers:
|
|
15
|
-
- call_async_from_sync(awaitable_or_fn, *args, timeout=None, **kwargs)
|
|
16
|
-
- call_sync_from_async(fn, *args, **kwargs) # await this from async code
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
def __init__(self, *args, **kwargs):
|
|
20
|
-
super().__init__(*args, **kwargs)
|
|
21
|
-
self._loop: asyncio.AbstractEventLoop | None = None
|
|
22
|
-
self._thread: threading.Thread | None = None
|
|
23
|
-
self._lock = threading.Lock()
|
|
24
|
-
|
|
25
|
-
# ---------- loop management ----------
|
|
26
|
-
|
|
27
|
-
def _ensure_loop(self) -> asyncio.AbstractEventLoop:
|
|
28
|
-
with self._lock:
|
|
29
|
-
if self._loop is not None:
|
|
30
|
-
return self._loop
|
|
31
|
-
|
|
32
|
-
loop = asyncio.new_event_loop()
|
|
33
|
-
|
|
34
|
-
def _runner():
|
|
35
|
-
asyncio.set_event_loop(loop)
|
|
36
|
-
loop.run_forever()
|
|
37
|
-
|
|
38
|
-
t = threading.Thread(target=_runner, daemon=True)
|
|
39
|
-
t.start()
|
|
40
|
-
while not loop.is_running():
|
|
41
|
-
pass
|
|
42
|
-
|
|
43
|
-
self._loop = loop
|
|
44
|
-
self._thread = t
|
|
45
|
-
return loop
|
|
46
|
-
|
|
47
|
-
def _shutdown_loop(self) -> None:
|
|
48
|
-
with self._lock:
|
|
49
|
-
loop, t = self._loop, self._thread
|
|
50
|
-
self._loop = None
|
|
51
|
-
self._thread = None
|
|
52
|
-
|
|
53
|
-
if loop and loop.is_running():
|
|
54
|
-
try:
|
|
55
|
-
loop.call_soon_threadsafe(loop.stop)
|
|
56
|
-
except RuntimeError:
|
|
57
|
-
pass
|
|
58
|
-
if t and t.is_alive():
|
|
59
|
-
t.join(timeout=1.0)
|
|
60
|
-
|
|
61
|
-
# ---------- public helpers ----------
|
|
62
|
-
|
|
63
|
-
def call_async_from_sync(
|
|
64
|
-
self,
|
|
65
|
-
awaitable_or_fn: Callable[..., Any] | Any,
|
|
66
|
-
*args,
|
|
67
|
-
timeout: float,
|
|
68
|
-
**kwargs,
|
|
69
|
-
):
|
|
70
|
-
"""
|
|
71
|
-
Run a coroutine or async function on this client's loop from sync code.
|
|
72
|
-
|
|
73
|
-
Usage:
|
|
74
|
-
mcp.call_async_from_sync(async_fn, arg1, kw=...)
|
|
75
|
-
mcp.call_async_from_sync(coro)
|
|
76
|
-
"""
|
|
77
|
-
if inspect.iscoroutine(awaitable_or_fn):
|
|
78
|
-
coro = awaitable_or_fn
|
|
79
|
-
elif inspect.iscoroutinefunction(awaitable_or_fn):
|
|
80
|
-
coro = awaitable_or_fn(*args, **kwargs)
|
|
81
|
-
else:
|
|
82
|
-
raise TypeError(
|
|
83
|
-
"call_async_from_sync expects a coroutine or async function"
|
|
84
|
-
)
|
|
85
|
-
|
|
86
|
-
loop = self._ensure_loop()
|
|
87
|
-
fut = asyncio.run_coroutine_threadsafe(coro, loop)
|
|
88
|
-
return fut.result(timeout)
|
|
89
|
-
|
|
90
|
-
async def call_sync_from_async(self, fn: Callable[..., Any], *args, **kwargs):
|
|
91
|
-
"""
|
|
92
|
-
Await running a blocking function in the default threadpool from async code.
|
|
93
|
-
"""
|
|
94
|
-
loop = asyncio.get_running_loop()
|
|
95
|
-
return await loop.run_in_executor(None, lambda: fn(*args, **kwargs))
|
|
96
|
-
|
|
97
|
-
# ---------- optional cleanup ----------
|
|
98
|
-
|
|
99
|
-
def sync_close(self):
|
|
100
|
-
# Best-effort: try async close if parent provides it
|
|
101
|
-
aclose = self.close
|
|
102
|
-
if inspect.iscoroutinefunction(aclose):
|
|
103
|
-
try:
|
|
104
|
-
self.call_async_from_sync(aclose, timeout=10.0)
|
|
105
|
-
except Exception:
|
|
106
|
-
pass
|
|
107
|
-
self._shutdown_loop()
|
|
108
|
-
|
|
109
|
-
def __del__(self):
|
|
110
|
-
try:
|
|
111
|
-
self.sync_close()
|
|
112
|
-
except Exception:
|
|
113
|
-
pass
|
openhands/sdk/mcp/definition.py
DELETED
|
@@ -1,69 +0,0 @@
|
|
|
1
|
-
"""MCPTool definition and implementation."""
|
|
2
|
-
|
|
3
|
-
import mcp.types
|
|
4
|
-
from pydantic import Field
|
|
5
|
-
|
|
6
|
-
from openhands.sdk.llm import ImageContent, TextContent
|
|
7
|
-
from openhands.sdk.logger import get_logger
|
|
8
|
-
from openhands.sdk.tool import (
|
|
9
|
-
ObservationBase,
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
logger = get_logger(__name__)
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
# NOTE: We don't define MCPToolAction because it
|
|
17
|
-
# will be dynamically created from the MCP tool schema.
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class MCPToolObservation(ObservationBase):
|
|
21
|
-
"""Observation from MCP tool execution."""
|
|
22
|
-
|
|
23
|
-
content: list[TextContent | ImageContent] = Field(
|
|
24
|
-
default_factory=list,
|
|
25
|
-
description="Content returned from the MCP tool converted "
|
|
26
|
-
"to LLM Ready TextContent or ImageContent",
|
|
27
|
-
)
|
|
28
|
-
is_error: bool = Field(
|
|
29
|
-
default=False, description="Whether the call resulted in an error"
|
|
30
|
-
)
|
|
31
|
-
tool_name: str = Field(description="Name of the tool that was called")
|
|
32
|
-
|
|
33
|
-
@classmethod
|
|
34
|
-
def from_call_tool_result(
|
|
35
|
-
cls, tool_name: str, result: mcp.types.CallToolResult
|
|
36
|
-
) -> "MCPToolObservation":
|
|
37
|
-
"""Create an MCPToolObservation from a CallToolResult."""
|
|
38
|
-
content: list[mcp.types.ContentBlock] = result.content
|
|
39
|
-
convrted_content = []
|
|
40
|
-
for block in content:
|
|
41
|
-
if isinstance(block, mcp.types.TextContent):
|
|
42
|
-
convrted_content.append(TextContent(text=block.text))
|
|
43
|
-
elif isinstance(block, mcp.types.ImageContent):
|
|
44
|
-
convrted_content.append(
|
|
45
|
-
ImageContent(
|
|
46
|
-
image_urls=[f"data:{block.mimeType};base64,{block.data}"],
|
|
47
|
-
# ImageContent is inherited from mcp.types.ImageContent
|
|
48
|
-
# so we need to pass these fields
|
|
49
|
-
data=block.data,
|
|
50
|
-
mimeType=block.mimeType,
|
|
51
|
-
)
|
|
52
|
-
)
|
|
53
|
-
else:
|
|
54
|
-
logger.warning(
|
|
55
|
-
f"Unsupported MCP content block type: {type(block)}. Ignoring."
|
|
56
|
-
)
|
|
57
|
-
return cls(
|
|
58
|
-
content=convrted_content,
|
|
59
|
-
is_error=result.isError,
|
|
60
|
-
tool_name=tool_name,
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
@property
|
|
64
|
-
def agent_observation(self) -> list[TextContent | ImageContent]:
|
|
65
|
-
"""Format the observation for agent display."""
|
|
66
|
-
initial_message = f"[Tool '{self.tool_name}' executed.]\n"
|
|
67
|
-
if self.is_error:
|
|
68
|
-
initial_message += "[An error occurred during execution.]\n"
|
|
69
|
-
return [TextContent(text=initial_message)] + self.content
|