openhands-agent-server 1.8.2__py3-none-any.whl → 1.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/agent_server/api.py +2 -0
- openhands/agent_server/bash_router.py +3 -0
- openhands/agent_server/docker/build.py +29 -6
- openhands/agent_server/env_parser.py +54 -3
- openhands/agent_server/event_service.py +37 -13
- openhands/agent_server/file_router.py +3 -0
- openhands/agent_server/git_router.py +3 -1
- openhands/agent_server/logging_config.py +85 -27
- openhands/agent_server/middleware.py +10 -2
- openhands/agent_server/skills_router.py +181 -0
- openhands/agent_server/skills_service.py +401 -0
- {openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/METADATA +5 -1
- {openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/RECORD +16 -14
- {openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/WHEEL +1 -1
- {openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/entry_points.txt +0 -0
- {openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/top_level.txt +0 -0
openhands/agent_server/api.py
CHANGED
|
@@ -28,6 +28,7 @@ from openhands.agent_server.server_details_router import (
|
|
|
28
28
|
get_server_info,
|
|
29
29
|
server_details_router,
|
|
30
30
|
)
|
|
31
|
+
from openhands.agent_server.skills_router import skills_router
|
|
31
32
|
from openhands.agent_server.sockets import sockets_router
|
|
32
33
|
from openhands.agent_server.tool_preload_service import get_tool_preload_service
|
|
33
34
|
from openhands.agent_server.tool_router import tool_router
|
|
@@ -173,6 +174,7 @@ def _add_api_routes(app: FastAPI, config: Config) -> None:
|
|
|
173
174
|
api_router.include_router(file_router)
|
|
174
175
|
api_router.include_router(vscode_router)
|
|
175
176
|
api_router.include_router(desktop_router)
|
|
177
|
+
api_router.include_router(skills_router)
|
|
176
178
|
app.include_router(api_router)
|
|
177
179
|
app.include_router(sockets_router)
|
|
178
180
|
|
|
@@ -21,6 +21,7 @@ from openhands.agent_server.models import (
|
|
|
21
21
|
BashOutput,
|
|
22
22
|
ExecuteBashRequest,
|
|
23
23
|
)
|
|
24
|
+
from openhands.agent_server.server_details_router import update_last_execution_time
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
bash_router = APIRouter(prefix="/bash", tags=["Bash"])
|
|
@@ -84,6 +85,7 @@ async def batch_get_bash_events(
|
|
|
84
85
|
@bash_router.post("/start_bash_command")
|
|
85
86
|
async def start_bash_command(request: ExecuteBashRequest) -> BashCommand:
|
|
86
87
|
"""Execute a bash command in the background"""
|
|
88
|
+
update_last_execution_time()
|
|
87
89
|
command, _ = await bash_event_service.start_bash_command(request)
|
|
88
90
|
return command
|
|
89
91
|
|
|
@@ -91,6 +93,7 @@ async def start_bash_command(request: ExecuteBashRequest) -> BashCommand:
|
|
|
91
93
|
@bash_router.post("/execute_bash_command")
|
|
92
94
|
async def execute_bash_command(request: ExecuteBashRequest) -> BashOutput:
|
|
93
95
|
"""Execute a bash command and wait for a result"""
|
|
96
|
+
update_last_execution_time()
|
|
94
97
|
command, task = await bash_event_service.start_bash_command(request)
|
|
95
98
|
await task
|
|
96
99
|
page = await bash_event_service.search_bash_events(command_id__eq=command.id)
|
|
@@ -204,6 +204,32 @@ def _sanitize_branch(ref: str) -> str:
|
|
|
204
204
|
return re.sub(r"[^a-zA-Z0-9.-]+", "-", ref).lower()
|
|
205
205
|
|
|
206
206
|
|
|
207
|
+
def _truncate_ident(repo: str, tag: str, budget: int) -> str:
|
|
208
|
+
"""
|
|
209
|
+
Truncate repo+tag to fit budget, prioritizing tag preservation.
|
|
210
|
+
|
|
211
|
+
Strategy:
|
|
212
|
+
1. If both fit: return both
|
|
213
|
+
2. If tag fits but repo doesn't: truncate repo, keep full tag
|
|
214
|
+
3. If tag doesn't fit: truncate tag, discard repo
|
|
215
|
+
4. If no tag: truncate repo
|
|
216
|
+
"""
|
|
217
|
+
tag_suffix = f"_tag_{tag}" if tag else ""
|
|
218
|
+
full_ident = repo + tag_suffix
|
|
219
|
+
|
|
220
|
+
if len(full_ident) <= budget:
|
|
221
|
+
return full_ident
|
|
222
|
+
|
|
223
|
+
if not tag:
|
|
224
|
+
return repo[:budget]
|
|
225
|
+
|
|
226
|
+
if len(tag_suffix) <= budget:
|
|
227
|
+
repo_budget = budget - len(tag_suffix)
|
|
228
|
+
return repo[:repo_budget] + tag_suffix
|
|
229
|
+
|
|
230
|
+
return tag_suffix[:budget]
|
|
231
|
+
|
|
232
|
+
|
|
207
233
|
def _base_slug(image: str, max_len: int = 64) -> str:
|
|
208
234
|
"""
|
|
209
235
|
If the slug is too long, keep the most identifiable parts:
|
|
@@ -226,24 +252,21 @@ def _base_slug(image: str, max_len: int = 64) -> str:
|
|
|
226
252
|
|
|
227
253
|
# Parse components from the slug form
|
|
228
254
|
if "_tag_" in base_slug:
|
|
229
|
-
left, tag = base_slug.
|
|
255
|
+
left, tag = base_slug.rsplit("_tag_", 1) # Split on last : (rightmost tag)
|
|
230
256
|
else:
|
|
231
257
|
left, tag = base_slug, ""
|
|
232
258
|
|
|
233
259
|
parts = left.split("_s_") if left else []
|
|
234
260
|
repo = parts[-1] if parts else left # last path segment is the repo
|
|
235
261
|
|
|
236
|
-
# Reconstruct a compact, identifiable core: "<repo>[_tag_<tag>]"
|
|
237
|
-
ident = repo + (f"_tag_{tag}" if tag else "")
|
|
238
|
-
|
|
239
262
|
# Fit within budget, reserving space for the digest suffix
|
|
240
263
|
visible_budget = max_len - len(suffix)
|
|
241
264
|
assert visible_budget > 0, (
|
|
242
265
|
f"max_len too small to fit digest suffix with length {len(suffix)}"
|
|
243
266
|
)
|
|
244
267
|
|
|
245
|
-
|
|
246
|
-
return
|
|
268
|
+
ident = _truncate_ident(repo, tag, visible_budget)
|
|
269
|
+
return ident + suffix
|
|
247
270
|
|
|
248
271
|
|
|
249
272
|
def _git_info() -> tuple[str, str]:
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
We couldn't use pydantic-settings for this as we need complex nested types
|
|
3
3
|
and polymorphism."""
|
|
4
4
|
|
|
5
|
+
import importlib
|
|
5
6
|
import inspect
|
|
6
7
|
import json
|
|
7
8
|
import os
|
|
@@ -277,15 +278,65 @@ class DiscriminatedUnionEnvParser(EnvParser):
|
|
|
277
278
|
|
|
278
279
|
def from_env(self, key: str) -> JsonType:
|
|
279
280
|
kind = os.environ.get(f"{key}_KIND", MISSING)
|
|
281
|
+
kind_missing = False
|
|
280
282
|
if kind is MISSING:
|
|
281
|
-
|
|
282
|
-
|
|
283
|
+
kind_missing = True
|
|
284
|
+
# If there are other fields and there is exactly one kind, use it directly
|
|
285
|
+
if len(self.parsers) == 1:
|
|
286
|
+
kind = next(iter(self.parsers.keys()))
|
|
287
|
+
else:
|
|
288
|
+
return MISSING
|
|
289
|
+
# Type narrowing: kind is str here (from os.environ.get or dict keys)
|
|
290
|
+
kind = cast(str, kind)
|
|
291
|
+
|
|
292
|
+
# If kind contains dots, treat it as a full class name
|
|
293
|
+
if "." in kind:
|
|
294
|
+
kind = self._import_and_register_class(kind)
|
|
295
|
+
|
|
296
|
+
# Intentionally raise KeyError for invalid KIND - typos should fail early
|
|
283
297
|
parser = self.parsers[kind]
|
|
284
298
|
parser_result = parser.from_env(key)
|
|
285
|
-
|
|
299
|
+
|
|
300
|
+
# A kind was defined without other fields
|
|
301
|
+
if parser_result is MISSING:
|
|
302
|
+
# If the kind was not defined, the entry is MISSING
|
|
303
|
+
if kind_missing:
|
|
304
|
+
return MISSING
|
|
305
|
+
# Only a kind was defined
|
|
306
|
+
parser_result = {}
|
|
307
|
+
|
|
308
|
+
# Type narrowing: discriminated union parsers always return dicts
|
|
309
|
+
parser_result = cast(dict, parser_result)
|
|
286
310
|
parser_result["kind"] = kind
|
|
287
311
|
return parser_result
|
|
288
312
|
|
|
313
|
+
def _import_and_register_class(self, full_class_name: str) -> str:
|
|
314
|
+
"""Import a class from its full module path and register its parser.
|
|
315
|
+
|
|
316
|
+
Args:
|
|
317
|
+
full_class_name: Full class path (e.g., 'mymodule.submodule.MyClass')
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
The unqualified class name (e.g., 'MyClass')
|
|
321
|
+
"""
|
|
322
|
+
parts = full_class_name.rsplit(".", 1)
|
|
323
|
+
module_name = parts[0]
|
|
324
|
+
class_name = parts[1]
|
|
325
|
+
|
|
326
|
+
# If class already registered, just return the name
|
|
327
|
+
if class_name in self.parsers:
|
|
328
|
+
return class_name
|
|
329
|
+
|
|
330
|
+
# Import the module and get the class
|
|
331
|
+
module = importlib.import_module(module_name)
|
|
332
|
+
cls = getattr(module, class_name)
|
|
333
|
+
|
|
334
|
+
# Create and register the parser for this class
|
|
335
|
+
parser = get_env_parser(cls, _get_default_parsers())
|
|
336
|
+
self.parsers[class_name] = parser
|
|
337
|
+
|
|
338
|
+
return class_name
|
|
339
|
+
|
|
289
340
|
def to_env(self, key: str, value: Any, output: IO):
|
|
290
341
|
parser = self.parsers[value.kind]
|
|
291
342
|
parser.to_env(key, value, output)
|
|
@@ -311,7 +311,21 @@ class EventService:
|
|
|
311
311
|
with self._conversation.state as state:
|
|
312
312
|
run = state.execution_status != ConversationExecutionStatus.RUNNING
|
|
313
313
|
if run:
|
|
314
|
-
|
|
314
|
+
conversation = self._conversation
|
|
315
|
+
|
|
316
|
+
async def _run_with_error_handling():
|
|
317
|
+
try:
|
|
318
|
+
await loop.run_in_executor(None, conversation.run)
|
|
319
|
+
except Exception:
|
|
320
|
+
logger.exception("Error during conversation run from send_message")
|
|
321
|
+
|
|
322
|
+
# Fire-and-forget: This task is intentionally not tracked because
|
|
323
|
+
# send_message() is designed to return immediately after queuing the
|
|
324
|
+
# message. The conversation run happens in the background and any
|
|
325
|
+
# errors are logged. Unlike the run() method which is explicitly
|
|
326
|
+
# awaited, this pattern allows clients to send messages without
|
|
327
|
+
# blocking on the full conversation execution.
|
|
328
|
+
loop.create_task(_run_with_error_handling())
|
|
315
329
|
|
|
316
330
|
async def subscribe_to_events(self, subscriber: Subscriber[Event]) -> UUID:
|
|
317
331
|
subscriber_id = self._pub_sub.subscribe(subscriber)
|
|
@@ -319,20 +333,23 @@ class EventService:
|
|
|
319
333
|
# Send current state to the new subscriber immediately
|
|
320
334
|
if self._conversation:
|
|
321
335
|
state = self._conversation._state
|
|
336
|
+
# Create state snapshot while holding the lock to ensure consistency.
|
|
337
|
+
# ConversationStateUpdateEvent inherits from Event which has frozen=True
|
|
338
|
+
# in its model_config, making the snapshot immutable after creation.
|
|
322
339
|
with state:
|
|
323
|
-
# Create state update event with current state information
|
|
324
340
|
state_update_event = (
|
|
325
341
|
ConversationStateUpdateEvent.from_conversation_state(state)
|
|
326
342
|
)
|
|
327
343
|
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
344
|
+
# Send state update outside the lock - the event is frozen (immutable),
|
|
345
|
+
# so we don't need to hold the lock during the async send operation.
|
|
346
|
+
# This prevents potential deadlocks between the sync FIFOLock and async I/O.
|
|
347
|
+
try:
|
|
348
|
+
await subscriber(state_update_event)
|
|
349
|
+
except Exception as e:
|
|
350
|
+
logger.error(
|
|
351
|
+
f"Error sending initial state to subscriber {subscriber_id}: {e}"
|
|
352
|
+
)
|
|
336
353
|
|
|
337
354
|
return subscriber_id
|
|
338
355
|
|
|
@@ -497,8 +514,8 @@ class EventService:
|
|
|
497
514
|
async def _run_and_publish():
|
|
498
515
|
try:
|
|
499
516
|
await loop.run_in_executor(None, conversation.run)
|
|
500
|
-
except Exception
|
|
501
|
-
logger.
|
|
517
|
+
except Exception:
|
|
518
|
+
logger.exception("Error during conversation run")
|
|
502
519
|
finally:
|
|
503
520
|
# Clear task reference and publish state update
|
|
504
521
|
self._run_task = None
|
|
@@ -630,11 +647,18 @@ class EventService:
|
|
|
630
647
|
return
|
|
631
648
|
|
|
632
649
|
state = self._conversation._state
|
|
650
|
+
# Create state snapshot while holding the lock to ensure consistency.
|
|
651
|
+
# ConversationStateUpdateEvent inherits from Event which has frozen=True
|
|
652
|
+
# in its model_config, making the snapshot immutable after creation.
|
|
633
653
|
with state:
|
|
634
654
|
state_update_event = ConversationStateUpdateEvent.from_conversation_state(
|
|
635
655
|
state
|
|
636
656
|
)
|
|
637
|
-
|
|
657
|
+
# Publish outside the lock - the event is frozen (immutable).
|
|
658
|
+
# Note: _pub_sub iterates through subscribers sequentially. If any subscriber
|
|
659
|
+
# is slow, it will delay subsequent subscribers. For high-throughput scenarios,
|
|
660
|
+
# consider using asyncio.gather() for concurrent notification in the future.
|
|
661
|
+
await self._pub_sub(state_update_event)
|
|
638
662
|
|
|
639
663
|
async def __aenter__(self):
|
|
640
664
|
await self.start()
|
|
@@ -17,6 +17,7 @@ from openhands.agent_server.bash_service import get_default_bash_event_service
|
|
|
17
17
|
from openhands.agent_server.config import get_default_config
|
|
18
18
|
from openhands.agent_server.conversation_service import get_default_conversation_service
|
|
19
19
|
from openhands.agent_server.models import ExecuteBashRequest, Success
|
|
20
|
+
from openhands.agent_server.server_details_router import update_last_execution_time
|
|
20
21
|
from openhands.sdk.logger import get_logger
|
|
21
22
|
|
|
22
23
|
|
|
@@ -33,6 +34,7 @@ async def upload_file(
|
|
|
33
34
|
file: Annotated[UploadFile, File(...)],
|
|
34
35
|
) -> Success:
|
|
35
36
|
"""Upload a file to the workspace."""
|
|
37
|
+
update_last_execution_time()
|
|
36
38
|
logger.info(f"Uploading file: {path}")
|
|
37
39
|
try:
|
|
38
40
|
target_path = Path(path)
|
|
@@ -66,6 +68,7 @@ async def download_file(
|
|
|
66
68
|
path: Annotated[str, FastApiPath(description="Absolute file path.")],
|
|
67
69
|
) -> FileResponse:
|
|
68
70
|
"""Download a file from the workspace."""
|
|
71
|
+
update_last_execution_time()
|
|
69
72
|
logger.info(f"Downloading file: {path}")
|
|
70
73
|
try:
|
|
71
74
|
target_path = Path(path)
|
|
@@ -6,6 +6,7 @@ from pathlib import Path
|
|
|
6
6
|
|
|
7
7
|
from fastapi import APIRouter
|
|
8
8
|
|
|
9
|
+
from openhands.agent_server.server_details_router import update_last_execution_time
|
|
9
10
|
from openhands.sdk.git.git_changes import get_git_changes
|
|
10
11
|
from openhands.sdk.git.git_diff import get_git_diff
|
|
11
12
|
from openhands.sdk.git.models import GitChange, GitDiff
|
|
@@ -19,16 +20,17 @@ logger = logging.getLogger(__name__)
|
|
|
19
20
|
async def git_changes(
|
|
20
21
|
path: Path,
|
|
21
22
|
) -> list[GitChange]:
|
|
23
|
+
update_last_execution_time()
|
|
22
24
|
loop = asyncio.get_running_loop()
|
|
23
25
|
changes = await loop.run_in_executor(None, get_git_changes, path)
|
|
24
26
|
return changes
|
|
25
27
|
|
|
26
28
|
|
|
27
|
-
# bash event routes
|
|
28
29
|
@git_router.get("/diff/{path:path}")
|
|
29
30
|
async def git_diff(
|
|
30
31
|
path: Path,
|
|
31
32
|
) -> GitDiff:
|
|
33
|
+
update_last_execution_time()
|
|
32
34
|
loop = asyncio.get_running_loop()
|
|
33
35
|
changes = await loop.run_in_executor(None, get_git_diff, path)
|
|
34
36
|
return changes
|
|
@@ -3,7 +3,46 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
|
-
from
|
|
6
|
+
from pythonjsonlogger.json import JsonFormatter
|
|
7
|
+
|
|
8
|
+
from openhands.sdk.logger import ENV_JSON, ENV_LOG_LEVEL, IN_CI
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class UvicornAccessJsonFormatter(JsonFormatter):
|
|
12
|
+
"""JSON formatter for uvicorn access logs that extracts HTTP fields.
|
|
13
|
+
|
|
14
|
+
Uvicorn access logs pass structured data in record.args as a tuple:
|
|
15
|
+
(client_addr, method, full_path, http_version, status_code)
|
|
16
|
+
|
|
17
|
+
This formatter extracts these into separate JSON fields for better
|
|
18
|
+
querying and analysis in log aggregation systems like Datadog.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def add_fields(
|
|
22
|
+
self,
|
|
23
|
+
log_data: dict[str, Any],
|
|
24
|
+
record: logging.LogRecord,
|
|
25
|
+
message_dict: dict[str, Any],
|
|
26
|
+
) -> None:
|
|
27
|
+
super().add_fields(log_data, record, message_dict)
|
|
28
|
+
|
|
29
|
+
# Extract HTTP fields from uvicorn access log args
|
|
30
|
+
# record.args is a tuple for uvicorn access logs:
|
|
31
|
+
# (client_addr, method, full_path, http_version, status_code)
|
|
32
|
+
args = record.args
|
|
33
|
+
if isinstance(args, tuple) and len(args) >= 5:
|
|
34
|
+
client_addr, method, full_path, http_version, status_code = args[:5]
|
|
35
|
+
log_data["http.client_ip"] = client_addr
|
|
36
|
+
log_data["http.method"] = method
|
|
37
|
+
log_data["http.url"] = full_path
|
|
38
|
+
log_data["http.version"] = http_version
|
|
39
|
+
# status_code from uvicorn is typically an int, but handle edge cases
|
|
40
|
+
if isinstance(status_code, int):
|
|
41
|
+
log_data["http.status_code"] = status_code
|
|
42
|
+
elif isinstance(status_code, str) and status_code.isdigit():
|
|
43
|
+
log_data["http.status_code"] = int(status_code)
|
|
44
|
+
else:
|
|
45
|
+
log_data["http.status_code"] = status_code
|
|
7
46
|
|
|
8
47
|
|
|
9
48
|
def get_uvicorn_logging_config() -> dict[str, Any]:
|
|
@@ -13,42 +52,61 @@ def get_uvicorn_logging_config() -> dict[str, Any]:
|
|
|
13
52
|
This function creates a logging configuration that:
|
|
14
53
|
1. Preserves the SDK's root logger configuration
|
|
15
54
|
2. Routes uvicorn logs through the same handlers
|
|
16
|
-
3.
|
|
17
|
-
4.
|
|
55
|
+
3. Uses JSON formatter for access logs when LOG_JSON=true or in CI
|
|
56
|
+
4. Extracts HTTP fields into structured JSON attributes
|
|
18
57
|
"""
|
|
58
|
+
use_json = ENV_JSON or IN_CI
|
|
59
|
+
log_level = logging.getLevelName(ENV_LOG_LEVEL)
|
|
60
|
+
|
|
19
61
|
# Base configuration
|
|
20
|
-
|
|
21
|
-
# So we set it to False but configure loggers to propagate to root
|
|
22
|
-
config = {
|
|
62
|
+
config: dict[str, Any] = {
|
|
23
63
|
"version": 1,
|
|
24
64
|
"disable_existing_loggers": False,
|
|
25
|
-
"incremental": False,
|
|
65
|
+
"incremental": False,
|
|
26
66
|
"formatters": {},
|
|
27
67
|
"handlers": {},
|
|
28
|
-
"loggers": {
|
|
68
|
+
"loggers": {
|
|
69
|
+
# Common logger configurations - propagate to root
|
|
70
|
+
"uvicorn": {
|
|
71
|
+
"handlers": [],
|
|
72
|
+
"level": log_level,
|
|
73
|
+
"propagate": True,
|
|
74
|
+
},
|
|
75
|
+
"uvicorn.error": {
|
|
76
|
+
"handlers": [],
|
|
77
|
+
"level": log_level,
|
|
78
|
+
"propagate": True,
|
|
79
|
+
},
|
|
80
|
+
},
|
|
29
81
|
}
|
|
30
82
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
"
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
# Access logger
|
|
46
|
-
"uvicorn.access"
|
|
83
|
+
if use_json:
|
|
84
|
+
# Define JSON formatter for access logs with HTTP field extraction
|
|
85
|
+
config["formatters"]["access_json"] = {
|
|
86
|
+
"()": UvicornAccessJsonFormatter,
|
|
87
|
+
"fmt": "%(asctime)s %(levelname)s %(name)s %(message)s",
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
# Define handler for access logs
|
|
91
|
+
config["handlers"]["access_json"] = {
|
|
92
|
+
"class": "logging.StreamHandler",
|
|
93
|
+
"formatter": "access_json",
|
|
94
|
+
"stream": "ext://sys.stderr",
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
# Access logger uses dedicated JSON handler with HTTP field extraction
|
|
98
|
+
config["loggers"]["uvicorn.access"] = {
|
|
99
|
+
"handlers": ["access_json"],
|
|
100
|
+
"level": log_level,
|
|
101
|
+
"propagate": False, # Don't double-log
|
|
102
|
+
}
|
|
103
|
+
else:
|
|
104
|
+
# Non-JSON mode: propagate access logs to root (uses Rich handler)
|
|
105
|
+
config["loggers"]["uvicorn.access"] = {
|
|
47
106
|
"handlers": [],
|
|
48
|
-
"level":
|
|
107
|
+
"level": log_level,
|
|
49
108
|
"propagate": True,
|
|
50
|
-
}
|
|
51
|
-
}
|
|
109
|
+
}
|
|
52
110
|
|
|
53
111
|
return config
|
|
54
112
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import os
|
|
1
2
|
from urllib.parse import urlparse
|
|
2
3
|
|
|
3
4
|
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -5,8 +6,10 @@ from starlette.types import ASGIApp
|
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
class LocalhostCORSMiddleware(CORSMiddleware):
|
|
8
|
-
"""Custom CORS middleware that allows any request from localhost/127.0.0.1 domains
|
|
9
|
-
|
|
9
|
+
"""Custom CORS middleware that allows any request from localhost/127.0.0.1 domains.
|
|
10
|
+
|
|
11
|
+
Also allows the DOCKER_HOST_ADDR IP, while using standard CORS rules for
|
|
12
|
+
other origins.
|
|
10
13
|
"""
|
|
11
14
|
|
|
12
15
|
def __init__(self, app: ASGIApp, allow_origins: list[str]) -> None:
|
|
@@ -27,6 +30,11 @@ class LocalhostCORSMiddleware(CORSMiddleware):
|
|
|
27
30
|
if hostname in ["localhost", "127.0.0.1"]:
|
|
28
31
|
return True
|
|
29
32
|
|
|
33
|
+
# Also allow DOCKER_HOST_ADDR if set (for remote browser access)
|
|
34
|
+
docker_host_addr = os.environ.get("DOCKER_HOST_ADDR")
|
|
35
|
+
if docker_host_addr and hostname == docker_host_addr:
|
|
36
|
+
return True
|
|
37
|
+
|
|
30
38
|
# For missing origin or other origins, use the parent class's logic
|
|
31
39
|
result: bool = super().is_allowed_origin(origin)
|
|
32
40
|
return result
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""Skills router for OpenHands Agent Server.
|
|
2
|
+
|
|
3
|
+
This module defines the HTTP API endpoints for skill operations.
|
|
4
|
+
Business logic is delegated to skills_service.py.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Literal
|
|
8
|
+
|
|
9
|
+
from fastapi import APIRouter
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from openhands.agent_server.skills_service import (
|
|
13
|
+
ExposedUrlData,
|
|
14
|
+
load_all_skills,
|
|
15
|
+
sync_public_skills,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
skills_router = APIRouter(prefix="/skills", tags=["Skills"])
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ExposedUrl(BaseModel):
|
|
23
|
+
"""Represents an exposed URL from the sandbox."""
|
|
24
|
+
|
|
25
|
+
name: str
|
|
26
|
+
url: str
|
|
27
|
+
port: int
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class OrgConfig(BaseModel):
|
|
31
|
+
"""Configuration for loading organization-level skills."""
|
|
32
|
+
|
|
33
|
+
repository: str = Field(description="Selected repository (e.g., 'owner/repo')")
|
|
34
|
+
provider: str = Field(
|
|
35
|
+
description="Git provider type: github, gitlab, azure, bitbucket"
|
|
36
|
+
)
|
|
37
|
+
org_repo_url: str = Field(
|
|
38
|
+
description="Pre-authenticated Git URL for the organization repository. "
|
|
39
|
+
"Contains sensitive credentials - handle with care and avoid logging."
|
|
40
|
+
)
|
|
41
|
+
org_name: str = Field(description="Organization name")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class SandboxConfig(BaseModel):
|
|
45
|
+
"""Configuration for loading sandbox-specific skills."""
|
|
46
|
+
|
|
47
|
+
exposed_urls: list[ExposedUrl] = Field(
|
|
48
|
+
default_factory=list,
|
|
49
|
+
description="List of exposed URLs from the sandbox",
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class SkillsRequest(BaseModel):
|
|
54
|
+
"""Request body for loading skills."""
|
|
55
|
+
|
|
56
|
+
load_public: bool = Field(
|
|
57
|
+
default=True, description="Load public skills from OpenHands/skills repo"
|
|
58
|
+
)
|
|
59
|
+
load_user: bool = Field(
|
|
60
|
+
default=True, description="Load user skills from ~/.openhands/skills/"
|
|
61
|
+
)
|
|
62
|
+
load_project: bool = Field(
|
|
63
|
+
default=True, description="Load project skills from workspace"
|
|
64
|
+
)
|
|
65
|
+
load_org: bool = Field(default=True, description="Load organization-level skills")
|
|
66
|
+
project_dir: str | None = Field(
|
|
67
|
+
default=None, description="Workspace directory path for project skills"
|
|
68
|
+
)
|
|
69
|
+
org_config: OrgConfig | None = Field(
|
|
70
|
+
default=None, description="Organization skills configuration"
|
|
71
|
+
)
|
|
72
|
+
sandbox_config: SandboxConfig | None = Field(
|
|
73
|
+
default=None, description="Sandbox skills configuration"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class SkillInfo(BaseModel):
|
|
78
|
+
"""Skill information returned by the API."""
|
|
79
|
+
|
|
80
|
+
name: str
|
|
81
|
+
type: Literal["repo", "knowledge", "agentskills"]
|
|
82
|
+
content: str
|
|
83
|
+
triggers: list[str] = Field(default_factory=list)
|
|
84
|
+
source: str | None = None
|
|
85
|
+
description: str | None = None
|
|
86
|
+
is_agentskills_format: bool = False
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class SkillsResponse(BaseModel):
|
|
90
|
+
"""Response containing all available skills."""
|
|
91
|
+
|
|
92
|
+
skills: list[SkillInfo]
|
|
93
|
+
sources: dict[str, int] = Field(
|
|
94
|
+
default_factory=dict,
|
|
95
|
+
description="Count of skills loaded from each source",
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class SyncResponse(BaseModel):
|
|
100
|
+
"""Response from skill sync operation."""
|
|
101
|
+
|
|
102
|
+
status: Literal["success", "error"]
|
|
103
|
+
message: str
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@skills_router.post("", response_model=SkillsResponse)
|
|
107
|
+
def get_skills(request: SkillsRequest) -> SkillsResponse:
|
|
108
|
+
"""Load and merge skills from all configured sources.
|
|
109
|
+
|
|
110
|
+
Skills are loaded from multiple sources and merged with the following
|
|
111
|
+
precedence (later overrides earlier for duplicate names):
|
|
112
|
+
1. Sandbox skills (lowest) - Exposed URLs from sandbox
|
|
113
|
+
2. Public skills - From GitHub OpenHands/skills repository
|
|
114
|
+
3. User skills - From ~/.openhands/skills/
|
|
115
|
+
4. Organization skills - From {org}/.openhands or equivalent
|
|
116
|
+
5. Project skills (highest) - From {workspace}/.openhands/skills/
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
request: SkillsRequest containing configuration for which sources to load.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
SkillsResponse containing merged skills and source counts.
|
|
123
|
+
"""
|
|
124
|
+
# Convert Pydantic models to service data types
|
|
125
|
+
sandbox_urls = None
|
|
126
|
+
if request.sandbox_config and request.sandbox_config.exposed_urls:
|
|
127
|
+
sandbox_urls = [
|
|
128
|
+
ExposedUrlData(name=url.name, url=url.url, port=url.port)
|
|
129
|
+
for url in request.sandbox_config.exposed_urls
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
org_repo_url = None
|
|
133
|
+
org_name = None
|
|
134
|
+
if request.org_config:
|
|
135
|
+
org_repo_url = request.org_config.org_repo_url
|
|
136
|
+
org_name = request.org_config.org_name
|
|
137
|
+
|
|
138
|
+
# Call the service
|
|
139
|
+
result = load_all_skills(
|
|
140
|
+
load_public=request.load_public,
|
|
141
|
+
load_user=request.load_user,
|
|
142
|
+
load_project=request.load_project,
|
|
143
|
+
load_org=request.load_org,
|
|
144
|
+
project_dir=request.project_dir,
|
|
145
|
+
org_repo_url=org_repo_url,
|
|
146
|
+
org_name=org_name,
|
|
147
|
+
sandbox_exposed_urls=sandbox_urls,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Convert Skill objects to SkillInfo for response
|
|
151
|
+
skills_info = [
|
|
152
|
+
SkillInfo(
|
|
153
|
+
name=info.name,
|
|
154
|
+
type=info.type,
|
|
155
|
+
content=info.content,
|
|
156
|
+
triggers=info.triggers,
|
|
157
|
+
source=info.source,
|
|
158
|
+
description=info.description,
|
|
159
|
+
is_agentskills_format=info.is_agentskills_format,
|
|
160
|
+
)
|
|
161
|
+
for info in (skill.to_skill_info() for skill in result.skills)
|
|
162
|
+
]
|
|
163
|
+
|
|
164
|
+
return SkillsResponse(skills=skills_info, sources=result.sources)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
@skills_router.post("/sync", response_model=SyncResponse)
|
|
168
|
+
def sync_skills() -> SyncResponse:
|
|
169
|
+
"""Force refresh of public skills from GitHub repository.
|
|
170
|
+
|
|
171
|
+
This triggers a git pull on the cached skills repository to get
|
|
172
|
+
the latest skills from the OpenHands/skills repository.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
SyncResponse indicating success or failure.
|
|
176
|
+
"""
|
|
177
|
+
success, message = sync_public_skills()
|
|
178
|
+
return SyncResponse(
|
|
179
|
+
status="success" if success else "error",
|
|
180
|
+
message=message,
|
|
181
|
+
)
|
|
@@ -0,0 +1,401 @@
|
|
|
1
|
+
"""Skills service for OpenHands Agent Server.
|
|
2
|
+
|
|
3
|
+
This module contains the business logic for skill loading and management,
|
|
4
|
+
keeping the router clean and focused on HTTP concerns.
|
|
5
|
+
|
|
6
|
+
Skill Sources:
|
|
7
|
+
- Public skills: GitHub OpenHands/skills repository
|
|
8
|
+
- User skills: ~/.openhands/skills/ and ~/.openhands/microagents/
|
|
9
|
+
- Project skills: {workspace}/.openhands/skills/, .cursorrules, agents.md
|
|
10
|
+
- Organization skills: {org}/.openhands or {org}/openhands-config
|
|
11
|
+
- Sandbox skills: Exposed URLs from sandbox environment
|
|
12
|
+
|
|
13
|
+
Precedence (later overrides earlier):
|
|
14
|
+
sandbox < public < user < org < project
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import os
|
|
18
|
+
import shutil
|
|
19
|
+
import subprocess
|
|
20
|
+
import tempfile
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
from openhands.sdk.context.skills import (
|
|
25
|
+
Skill,
|
|
26
|
+
load_project_skills,
|
|
27
|
+
load_public_skills,
|
|
28
|
+
load_user_skills,
|
|
29
|
+
)
|
|
30
|
+
from openhands.sdk.context.skills.skill import (
|
|
31
|
+
PUBLIC_SKILLS_BRANCH,
|
|
32
|
+
PUBLIC_SKILLS_REPO,
|
|
33
|
+
load_skills_from_dir,
|
|
34
|
+
)
|
|
35
|
+
from openhands.sdk.context.skills.utils import (
|
|
36
|
+
get_skills_cache_dir,
|
|
37
|
+
update_skills_repository,
|
|
38
|
+
)
|
|
39
|
+
from openhands.sdk.logger import get_logger
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
logger = get_logger(__name__)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# Content template for sandbox work hosts skill
|
|
46
|
+
WORK_HOSTS_SKILL_CONTENT = (
|
|
47
|
+
"The user has access to the following hosts for accessing "
|
|
48
|
+
"a web application, each of which has a corresponding port:\n{hosts}"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Prefix for sandbox URLs that should be exposed as work_hosts skill.
|
|
52
|
+
# URLs with names starting with this prefix represent web applications
|
|
53
|
+
# or services running in the sandbox that the agent should be aware of.
|
|
54
|
+
SANDBOX_WORKER_URL_PREFIX = "WORKER_"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@dataclass
|
|
58
|
+
class ExposedUrlData:
|
|
59
|
+
"""Internal representation of an exposed URL from the sandbox."""
|
|
60
|
+
|
|
61
|
+
name: str
|
|
62
|
+
url: str
|
|
63
|
+
port: int
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class SkillLoadResult:
|
|
68
|
+
"""Result of loading skills from all sources."""
|
|
69
|
+
|
|
70
|
+
skills: list[Skill]
|
|
71
|
+
sources: dict[str, int]
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def load_org_skills_from_url(
|
|
75
|
+
org_repo_url: str,
|
|
76
|
+
org_name: str,
|
|
77
|
+
working_dir: str | Path | None = None,
|
|
78
|
+
) -> list[Skill]:
|
|
79
|
+
"""Load skills from an organization repository.
|
|
80
|
+
|
|
81
|
+
This function clones an organization-level skills repository to a temporary
|
|
82
|
+
directory, loads skills from the skills/ and microagents/ directories, and
|
|
83
|
+
then cleans up the temporary directory.
|
|
84
|
+
|
|
85
|
+
The org_repo_url should be a pre-authenticated Git URL (e.g., containing
|
|
86
|
+
credentials or tokens) as provided by the app-server.
|
|
87
|
+
|
|
88
|
+
Note:
|
|
89
|
+
This is a blocking I/O operation that may take up to 120 seconds due to
|
|
90
|
+
the git clone timeout. When called from FastAPI endpoints defined with
|
|
91
|
+
`def` (not `async def`), FastAPI automatically runs this in a thread
|
|
92
|
+
pool to avoid blocking the event loop. Do not call this function
|
|
93
|
+
directly from async code without wrapping it in asyncio.to_thread().
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
org_repo_url: Pre-authenticated Git URL for the organization repository.
|
|
97
|
+
This should be a full Git URL that includes authentication.
|
|
98
|
+
org_name: Name of the organization (used for temp directory naming).
|
|
99
|
+
working_dir: Optional working directory for git operations. If None,
|
|
100
|
+
uses a subdirectory of the system temp directory.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
List of Skill objects loaded from the organization repository.
|
|
104
|
+
Returns empty list if the repository doesn't exist or loading fails.
|
|
105
|
+
"""
|
|
106
|
+
all_skills: list[Skill] = []
|
|
107
|
+
|
|
108
|
+
# Determine the temporary directory for cloning
|
|
109
|
+
if working_dir:
|
|
110
|
+
base_dir = Path(working_dir) if isinstance(working_dir, str) else working_dir
|
|
111
|
+
temp_dir = base_dir / f"_org_skills_{org_name}"
|
|
112
|
+
else:
|
|
113
|
+
temp_dir = Path(tempfile.gettempdir()) / f"openhands_org_skills_{org_name}"
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
# Clean up any existing temp directory
|
|
117
|
+
if temp_dir.exists():
|
|
118
|
+
shutil.rmtree(temp_dir)
|
|
119
|
+
|
|
120
|
+
# Clone the organization repository (shallow clone for efficiency)
|
|
121
|
+
logger.info(f"Cloning organization skills repository for {org_name}")
|
|
122
|
+
try:
|
|
123
|
+
subprocess.run(
|
|
124
|
+
[
|
|
125
|
+
"git",
|
|
126
|
+
"clone",
|
|
127
|
+
"--depth",
|
|
128
|
+
"1",
|
|
129
|
+
org_repo_url,
|
|
130
|
+
str(temp_dir),
|
|
131
|
+
],
|
|
132
|
+
check=True,
|
|
133
|
+
capture_output=True,
|
|
134
|
+
timeout=120,
|
|
135
|
+
env={**os.environ, "GIT_TERMINAL_PROMPT": "0"},
|
|
136
|
+
)
|
|
137
|
+
except subprocess.CalledProcessError:
|
|
138
|
+
# Repository doesn't exist or access denied - this is expected.
|
|
139
|
+
# Note: We intentionally don't log stderr as it may contain credentials.
|
|
140
|
+
logger.debug(
|
|
141
|
+
f"Organization repository not found or access denied for {org_name}"
|
|
142
|
+
)
|
|
143
|
+
return all_skills
|
|
144
|
+
except subprocess.TimeoutExpired:
|
|
145
|
+
logger.warning(
|
|
146
|
+
f"Git clone timed out for organization repository {org_name}"
|
|
147
|
+
)
|
|
148
|
+
return all_skills
|
|
149
|
+
|
|
150
|
+
logger.debug(f"Successfully cloned org repository to {temp_dir}")
|
|
151
|
+
|
|
152
|
+
# Load skills from skills/ directory (preferred)
|
|
153
|
+
skills_dir = temp_dir / "skills"
|
|
154
|
+
if skills_dir.exists():
|
|
155
|
+
try:
|
|
156
|
+
repo_skills, knowledge_skills, agent_skills = load_skills_from_dir(
|
|
157
|
+
skills_dir
|
|
158
|
+
)
|
|
159
|
+
for skills_dict in [repo_skills, knowledge_skills, agent_skills]:
|
|
160
|
+
all_skills.extend(skills_dict.values())
|
|
161
|
+
logger.debug(
|
|
162
|
+
f"Loaded {len(all_skills)} skills from org skills/ directory"
|
|
163
|
+
)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
logger.warning(f"Failed to load skills from {skills_dir}: {e}")
|
|
166
|
+
|
|
167
|
+
# Load skills from microagents/ directory (legacy support)
|
|
168
|
+
microagents_dir = temp_dir / "microagents"
|
|
169
|
+
if microagents_dir.exists():
|
|
170
|
+
seen_names = {s.name for s in all_skills}
|
|
171
|
+
try:
|
|
172
|
+
repo_skills, knowledge_skills, agent_skills = load_skills_from_dir(
|
|
173
|
+
microagents_dir
|
|
174
|
+
)
|
|
175
|
+
for skills_dict in [repo_skills, knowledge_skills, agent_skills]:
|
|
176
|
+
for name, skill in skills_dict.items():
|
|
177
|
+
if name not in seen_names:
|
|
178
|
+
all_skills.append(skill)
|
|
179
|
+
seen_names.add(name)
|
|
180
|
+
else:
|
|
181
|
+
logger.debug(
|
|
182
|
+
f"Skipping duplicate org skill '{name}' "
|
|
183
|
+
"from microagents/"
|
|
184
|
+
)
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.warning(f"Failed to load skills from {microagents_dir}: {e}")
|
|
187
|
+
|
|
188
|
+
logger.info(
|
|
189
|
+
f"Loaded {len(all_skills)} organization skills for {org_name}: "
|
|
190
|
+
f"{[s.name for s in all_skills]}"
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
logger.warning(f"Failed to load organization skills for {org_name}: {e}")
|
|
195
|
+
|
|
196
|
+
finally:
|
|
197
|
+
# Clean up the temporary directory
|
|
198
|
+
if temp_dir.exists():
|
|
199
|
+
try:
|
|
200
|
+
shutil.rmtree(temp_dir)
|
|
201
|
+
logger.debug(f"Cleaned up temp directory {temp_dir}")
|
|
202
|
+
except Exception as e:
|
|
203
|
+
logger.warning(f"Failed to clean up temp directory {temp_dir}: {e}")
|
|
204
|
+
|
|
205
|
+
return all_skills
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def create_sandbox_skill(
|
|
209
|
+
exposed_urls: list[ExposedUrlData],
|
|
210
|
+
) -> Skill | None:
|
|
211
|
+
"""Create a skill from sandbox exposed URLs.
|
|
212
|
+
|
|
213
|
+
This function creates a skill that informs the agent about web applications
|
|
214
|
+
and services available in the sandbox environment via exposed ports/URLs.
|
|
215
|
+
|
|
216
|
+
Only URLs with names starting with SANDBOX_WORKER_URL_PREFIX are included,
|
|
217
|
+
as these represent web applications the agent should be aware of.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
exposed_urls: List of ExposedUrlData objects containing name, url, and port.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
A Skill object with work_hosts content if there are matching URLs,
|
|
224
|
+
or None if no relevant URLs are provided.
|
|
225
|
+
"""
|
|
226
|
+
if not exposed_urls:
|
|
227
|
+
return None
|
|
228
|
+
|
|
229
|
+
# Filter for URLs with the worker prefix
|
|
230
|
+
worker_urls = [
|
|
231
|
+
url for url in exposed_urls if url.name.startswith(SANDBOX_WORKER_URL_PREFIX)
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
if not worker_urls:
|
|
235
|
+
return None
|
|
236
|
+
|
|
237
|
+
# Build the hosts content
|
|
238
|
+
hosts_lines = []
|
|
239
|
+
for url_info in worker_urls:
|
|
240
|
+
hosts_lines.append(f"* {url_info.url} (port {url_info.port})")
|
|
241
|
+
|
|
242
|
+
hosts_content = "\n".join(hosts_lines)
|
|
243
|
+
content = WORK_HOSTS_SKILL_CONTENT.format(hosts=hosts_content)
|
|
244
|
+
|
|
245
|
+
return Skill(
|
|
246
|
+
name="work_hosts",
|
|
247
|
+
content=content,
|
|
248
|
+
trigger=None, # Always active
|
|
249
|
+
source=None, # Programmatically generated
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def merge_skills(skill_lists: list[list[Skill]]) -> list[Skill]:
|
|
254
|
+
"""Merge multiple skill lists with precedence.
|
|
255
|
+
|
|
256
|
+
Later lists override earlier lists for duplicate names.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
skill_lists: List of skill lists to merge in order of precedence.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Merged list of skills with duplicates resolved.
|
|
263
|
+
"""
|
|
264
|
+
skills_by_name: dict[str, Skill] = {}
|
|
265
|
+
|
|
266
|
+
for skill_list in skill_lists:
|
|
267
|
+
for skill in skill_list:
|
|
268
|
+
if skill.name in skills_by_name:
|
|
269
|
+
logger.info(
|
|
270
|
+
f"Overriding skill '{skill.name}' from earlier source "
|
|
271
|
+
"with later source"
|
|
272
|
+
)
|
|
273
|
+
skills_by_name[skill.name] = skill
|
|
274
|
+
|
|
275
|
+
return list(skills_by_name.values())
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def load_all_skills(
|
|
279
|
+
load_public: bool = True,
|
|
280
|
+
load_user: bool = True,
|
|
281
|
+
load_project: bool = True,
|
|
282
|
+
load_org: bool = True,
|
|
283
|
+
project_dir: str | None = None,
|
|
284
|
+
org_repo_url: str | None = None,
|
|
285
|
+
org_name: str | None = None,
|
|
286
|
+
sandbox_exposed_urls: list[ExposedUrlData] | None = None,
|
|
287
|
+
) -> SkillLoadResult:
|
|
288
|
+
"""Load and merge skills from all configured sources.
|
|
289
|
+
|
|
290
|
+
Skills are loaded from multiple sources and merged with the following
|
|
291
|
+
precedence (later overrides earlier for duplicate names):
|
|
292
|
+
1. Sandbox skills (lowest) - Exposed URLs from sandbox
|
|
293
|
+
2. Public skills - From GitHub OpenHands/skills repository
|
|
294
|
+
3. User skills - From ~/.openhands/skills/
|
|
295
|
+
4. Organization skills - From {org}/.openhands or equivalent
|
|
296
|
+
5. Project skills (highest) - From {workspace}/.openhands/skills/
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
load_public: Whether to load public skills from OpenHands/skills repo.
|
|
300
|
+
load_user: Whether to load user skills from ~/.openhands/skills/.
|
|
301
|
+
load_project: Whether to load project skills from workspace.
|
|
302
|
+
load_org: Whether to load organization-level skills.
|
|
303
|
+
project_dir: Workspace directory path for project skills.
|
|
304
|
+
org_repo_url: Pre-authenticated Git URL for org skills.
|
|
305
|
+
org_name: Organization name for org skills.
|
|
306
|
+
sandbox_exposed_urls: List of exposed URLs from sandbox.
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
SkillLoadResult containing merged skills and source counts.
|
|
310
|
+
"""
|
|
311
|
+
sources: dict[str, int] = {}
|
|
312
|
+
skill_lists: list[list[Skill]] = []
|
|
313
|
+
|
|
314
|
+
# 1. Load sandbox skills (lowest precedence)
|
|
315
|
+
sandbox_skills: list[Skill] = []
|
|
316
|
+
if sandbox_exposed_urls:
|
|
317
|
+
sandbox_skill = create_sandbox_skill(sandbox_exposed_urls)
|
|
318
|
+
if sandbox_skill:
|
|
319
|
+
sandbox_skills.append(sandbox_skill)
|
|
320
|
+
sources["sandbox"] = len(sandbox_skills)
|
|
321
|
+
skill_lists.append(sandbox_skills)
|
|
322
|
+
|
|
323
|
+
# 2. Load public skills
|
|
324
|
+
public_skills: list[Skill] = []
|
|
325
|
+
if load_public:
|
|
326
|
+
try:
|
|
327
|
+
public_skills = load_public_skills()
|
|
328
|
+
logger.info(f"Loaded {len(public_skills)} public skills")
|
|
329
|
+
except Exception as e:
|
|
330
|
+
logger.warning(f"Failed to load public skills: {e}")
|
|
331
|
+
sources["public"] = len(public_skills)
|
|
332
|
+
skill_lists.append(public_skills)
|
|
333
|
+
|
|
334
|
+
# 3. Load user skills
|
|
335
|
+
user_skills: list[Skill] = []
|
|
336
|
+
if load_user:
|
|
337
|
+
try:
|
|
338
|
+
user_skills = load_user_skills()
|
|
339
|
+
logger.info(f"Loaded {len(user_skills)} user skills")
|
|
340
|
+
except Exception as e:
|
|
341
|
+
logger.warning(f"Failed to load user skills: {e}")
|
|
342
|
+
sources["user"] = len(user_skills)
|
|
343
|
+
skill_lists.append(user_skills)
|
|
344
|
+
|
|
345
|
+
# 4. Load organization skills
|
|
346
|
+
org_skills: list[Skill] = []
|
|
347
|
+
if load_org and org_repo_url and org_name:
|
|
348
|
+
try:
|
|
349
|
+
org_skills = load_org_skills_from_url(
|
|
350
|
+
org_repo_url=org_repo_url,
|
|
351
|
+
org_name=org_name,
|
|
352
|
+
)
|
|
353
|
+
logger.info(f"Loaded {len(org_skills)} organization skills")
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.warning(f"Failed to load organization skills: {e}")
|
|
356
|
+
sources["org"] = len(org_skills)
|
|
357
|
+
skill_lists.append(org_skills)
|
|
358
|
+
|
|
359
|
+
# 5. Load project skills (highest precedence)
|
|
360
|
+
project_skills: list[Skill] = []
|
|
361
|
+
if load_project and project_dir:
|
|
362
|
+
try:
|
|
363
|
+
project_skills = load_project_skills(project_dir)
|
|
364
|
+
logger.info(f"Loaded {len(project_skills)} project skills")
|
|
365
|
+
except Exception as e:
|
|
366
|
+
logger.warning(f"Failed to load project skills: {e}")
|
|
367
|
+
sources["project"] = len(project_skills)
|
|
368
|
+
skill_lists.append(project_skills)
|
|
369
|
+
|
|
370
|
+
# Merge all skills with precedence
|
|
371
|
+
all_skills = merge_skills(skill_lists)
|
|
372
|
+
|
|
373
|
+
logger.info(
|
|
374
|
+
f"Returning {len(all_skills)} total skills: {[s.name for s in all_skills]}"
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
return SkillLoadResult(skills=all_skills, sources=sources)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def sync_public_skills() -> tuple[bool, str]:
|
|
381
|
+
"""Force refresh of public skills from GitHub repository.
|
|
382
|
+
|
|
383
|
+
This triggers a git pull on the cached skills repository to get
|
|
384
|
+
the latest skills from the OpenHands/skills repository.
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
Tuple of (success: bool, message: str).
|
|
388
|
+
"""
|
|
389
|
+
try:
|
|
390
|
+
cache_dir = get_skills_cache_dir()
|
|
391
|
+
result = update_skills_repository(
|
|
392
|
+
PUBLIC_SKILLS_REPO, PUBLIC_SKILLS_BRANCH, cache_dir
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
if result:
|
|
396
|
+
return (True, "Skills repository synced successfully")
|
|
397
|
+
else:
|
|
398
|
+
return (False, "Failed to sync skills repository")
|
|
399
|
+
except Exception as e:
|
|
400
|
+
logger.warning(f"Failed to sync skills repository: {e}")
|
|
401
|
+
return (False, f"Sync failed: {str(e)}")
|
|
@@ -1,7 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openhands-agent-server
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.9.1
|
|
4
4
|
Summary: OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent
|
|
5
|
+
Project-URL: Source, https://github.com/OpenHands/software-agent-sdk
|
|
6
|
+
Project-URL: Homepage, https://github.com/OpenHands/software-agent-sdk
|
|
7
|
+
Project-URL: Documentation, https://docs.openhands.dev/sdk
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/OpenHands/software-agent-sdk/issues
|
|
5
9
|
Requires-Python: >=3.12
|
|
6
10
|
Requires-Dist: aiosqlite>=0.19
|
|
7
11
|
Requires-Dist: alembic>=1.13
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
openhands/agent_server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
openhands/agent_server/__main__.py,sha256=QCdBRWVV9gNtPwRwYieQisvKsmJljjJ8f293RhtHl_w,3642
|
|
3
|
-
openhands/agent_server/api.py,sha256=
|
|
4
|
-
openhands/agent_server/bash_router.py,sha256=
|
|
3
|
+
openhands/agent_server/api.py,sha256=LqU4BWXc4VQ29sIiABZykznSMJ_dUQEOHixxeLBTuGY,12009
|
|
4
|
+
openhands/agent_server/bash_router.py,sha256=9GRsLeVZgqR1XeayJvBBycVqfg5O-Fye2ek75kGCuno,3353
|
|
5
5
|
openhands/agent_server/bash_service.py,sha256=QgTSyQoxlk52BnZuo35xlX7u7-3xs5BvvaKU3AEva_w,14083
|
|
6
6
|
openhands/agent_server/config.py,sha256=EKxVV0QyD3KLLtFxNuUextxJplLW_zZ32nPFhli6ozA,6403
|
|
7
7
|
openhands/agent_server/conversation_router.py,sha256=lz-dnfPXrVBiGZ9GhYqfteCWJ4pq7AcnWqxsKWwCfc0,11178
|
|
@@ -9,18 +9,20 @@ openhands/agent_server/conversation_service.py,sha256=Oj8IBSY9ZYbduOOBatd3I0H_kS
|
|
|
9
9
|
openhands/agent_server/dependencies.py,sha256=H3zyOc8uthpXseB3E7rWNccKIj7PlyfcgCYwFvmFq4c,2629
|
|
10
10
|
openhands/agent_server/desktop_router.py,sha256=OaCmevO33eUo3jTwiXBmQ3uT3ONu4-tqgBfYpZWrHSA,1349
|
|
11
11
|
openhands/agent_server/desktop_service.py,sha256=iCwQJXK4DvGuBXKOQ1oko60wXkf_pYHCubOzBsd2k60,7415
|
|
12
|
-
openhands/agent_server/env_parser.py,sha256=
|
|
12
|
+
openhands/agent_server/env_parser.py,sha256=6b4-Iegq82crMbwjo1w4C_nu5B0Wm0xysrhGhfiyMF8,16770
|
|
13
13
|
openhands/agent_server/event_router.py,sha256=XM46zcqPOXStISfihzsPXPfsW_23E50brmBHk04ncVI,6156
|
|
14
|
-
openhands/agent_server/event_service.py,sha256=
|
|
15
|
-
openhands/agent_server/file_router.py,sha256=
|
|
16
|
-
openhands/agent_server/git_router.py,sha256=
|
|
17
|
-
openhands/agent_server/logging_config.py,sha256=
|
|
18
|
-
openhands/agent_server/middleware.py,sha256=
|
|
14
|
+
openhands/agent_server/event_service.py,sha256=EFjIkWbTrROAv2zFzsIOhcRDTZIhf0uIGQStNDg8nFY,26838
|
|
15
|
+
openhands/agent_server/file_router.py,sha256=MqFmTcDFE42EEPwRncBtT-Vu8_U78OZfC6pm0tnlBZk,4161
|
|
16
|
+
openhands/agent_server/git_router.py,sha256=z-jbkY4me1HuLOWTzJu_osI19ZGGri5ffYujVAWe31s,974
|
|
17
|
+
openhands/agent_server/logging_config.py,sha256=b3N5LVGuwDd0bBsMxrVdCHa8N1Nsreawgi23hTkDrro,3985
|
|
18
|
+
openhands/agent_server/middleware.py,sha256=WQN5T14E58cvG2ZG6qfaJL4Dk9DqUGqD5tyAva1Un_4,1407
|
|
19
19
|
openhands/agent_server/models.py,sha256=R3WSDIQN1burIyV4Dz05YITR9WWcC_pXgZDk-7WG6hw,9723
|
|
20
20
|
openhands/agent_server/openapi.py,sha256=RJWaOnM9NjzrH-fJi3PoIBv5d0sH5z8zZTdvYzSqoFU,482
|
|
21
21
|
openhands/agent_server/pub_sub.py,sha256=yPB84Wub7A2uwUWsW_grbxomWmKaWyGuo6dVNUzg1FU,2755
|
|
22
22
|
openhands/agent_server/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
23
23
|
openhands/agent_server/server_details_router.py,sha256=LCa0NQ3SOw-IgJh3rDJL5GolFOpZcX_-XEbDLHF8dN4,950
|
|
24
|
+
openhands/agent_server/skills_router.py,sha256=mYiIAOGJxnpMz_OsbtP2cBjHNvo3qYUXqGPHKQs_pEw,5601
|
|
25
|
+
openhands/agent_server/skills_service.py,sha256=hmyo1FtUEwfPWk1NnMyGGhvIqPwO28Mjk3chUw_98XM,14125
|
|
24
26
|
openhands/agent_server/sockets.py,sha256=UhZr_QoHsIX7x_tzvk9_AJa2S8KJmRr4syhIU4He8Rs,6617
|
|
25
27
|
openhands/agent_server/tool_preload_service.py,sha256=2QuFyn7jC4Ifq2aUhs9j8876wSBdY0eeoLo4qEVi-yA,2341
|
|
26
28
|
openhands/agent_server/tool_router.py,sha256=vM_9UKUzfChLK9B9Z3DL4VtKNdDw4w635knu9N36y0c,676
|
|
@@ -28,12 +30,12 @@ openhands/agent_server/utils.py,sha256=ajivE_kGCJ9qUhF9H3Qu7DUKg7uDvDQk16JcO3Xnt
|
|
|
28
30
|
openhands/agent_server/vscode_router.py,sha256=tPmXzN6teuqMa1jvKS4Q3aWpk9p9wsp4LleKoDvkYGs,2133
|
|
29
31
|
openhands/agent_server/vscode_service.py,sha256=xS_vwIU5W5KwXTbOTzHPkPRIB4xbjmYlAygmRfBOAF8,7606
|
|
30
32
|
openhands/agent_server/docker/Dockerfile,sha256=rdFlMdI_uITipzR7-pPEGFx2Ld-jYhOBfGKONRk4dbU,10724
|
|
31
|
-
openhands/agent_server/docker/build.py,sha256=
|
|
33
|
+
openhands/agent_server/docker/build.py,sha256=UgHoLkgHZtgWqloG2MQ2RCzc6zTIyttOFL_vUzmw_c0,28189
|
|
32
34
|
openhands/agent_server/docker/wallpaper.svg,sha256=FR2g_b5mzz0x5EvRTKO93ASnWPagAyeS9RI3vRQBAsw,11532
|
|
33
35
|
openhands/agent_server/vscode_extensions/openhands-settings/extension.js,sha256=xoCKZ6YXlzlTWnTC52HuzX0sn9s77Vma-47WgEibO88,858
|
|
34
36
|
openhands/agent_server/vscode_extensions/openhands-settings/package.json,sha256=eCkuBBYEVArEjpp7c_m0H207OCLEygZhBLUEkeFNWOg,289
|
|
35
|
-
openhands_agent_server-1.
|
|
36
|
-
openhands_agent_server-1.
|
|
37
|
-
openhands_agent_server-1.
|
|
38
|
-
openhands_agent_server-1.
|
|
39
|
-
openhands_agent_server-1.
|
|
37
|
+
openhands_agent_server-1.9.1.dist-info/METADATA,sha256=B0VjqaMpQpjLUEpETaqh1uOPDKKE28xDE4G4NOnVkRc,748
|
|
38
|
+
openhands_agent_server-1.9.1.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
39
|
+
openhands_agent_server-1.9.1.dist-info/entry_points.txt,sha256=uLQzPhqDqe85Dy9DvPiBE2CeqkwCryggr1Ty_mq65NA,70
|
|
40
|
+
openhands_agent_server-1.9.1.dist-info/top_level.txt,sha256=jHgVu9I0Blam8BXFgedoGKfglPF8XvW1TsJFIjcgP4E,10
|
|
41
|
+
openhands_agent_server-1.9.1.dist-info/RECORD,,
|
{openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{openhands_agent_server-1.8.2.dist-info → openhands_agent_server-1.9.1.dist-info}/top_level.txt
RENAMED
|
File without changes
|