openhands 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openhands might be problematic. Click here for more details.

Files changed (94) hide show
  1. openhands/__init__.py +1 -0
  2. openhands/sdk/__init__.py +45 -0
  3. openhands/sdk/agent/__init__.py +8 -0
  4. openhands/sdk/agent/agent/__init__.py +6 -0
  5. openhands/sdk/agent/agent/agent.py +349 -0
  6. openhands/sdk/agent/base.py +103 -0
  7. openhands/sdk/context/__init__.py +28 -0
  8. openhands/sdk/context/agent_context.py +153 -0
  9. openhands/sdk/context/condenser/__init__.py +5 -0
  10. openhands/sdk/context/condenser/condenser.py +73 -0
  11. openhands/sdk/context/condenser/no_op_condenser.py +13 -0
  12. openhands/sdk/context/manager.py +5 -0
  13. openhands/sdk/context/microagents/__init__.py +26 -0
  14. openhands/sdk/context/microagents/exceptions.py +11 -0
  15. openhands/sdk/context/microagents/microagent.py +345 -0
  16. openhands/sdk/context/microagents/types.py +70 -0
  17. openhands/sdk/context/utils/__init__.py +8 -0
  18. openhands/sdk/context/utils/prompt.py +52 -0
  19. openhands/sdk/context/view.py +116 -0
  20. openhands/sdk/conversation/__init__.py +12 -0
  21. openhands/sdk/conversation/conversation.py +207 -0
  22. openhands/sdk/conversation/state.py +50 -0
  23. openhands/sdk/conversation/types.py +6 -0
  24. openhands/sdk/conversation/visualizer.py +300 -0
  25. openhands/sdk/event/__init__.py +27 -0
  26. openhands/sdk/event/base.py +148 -0
  27. openhands/sdk/event/condenser.py +49 -0
  28. openhands/sdk/event/llm_convertible.py +265 -0
  29. openhands/sdk/event/types.py +5 -0
  30. openhands/sdk/event/user_action.py +12 -0
  31. openhands/sdk/event/utils.py +30 -0
  32. openhands/sdk/llm/__init__.py +19 -0
  33. openhands/sdk/llm/exceptions.py +108 -0
  34. openhands/sdk/llm/llm.py +867 -0
  35. openhands/sdk/llm/llm_registry.py +116 -0
  36. openhands/sdk/llm/message.py +216 -0
  37. openhands/sdk/llm/metadata.py +34 -0
  38. openhands/sdk/llm/utils/fn_call_converter.py +1049 -0
  39. openhands/sdk/llm/utils/metrics.py +311 -0
  40. openhands/sdk/llm/utils/model_features.py +153 -0
  41. openhands/sdk/llm/utils/retry_mixin.py +122 -0
  42. openhands/sdk/llm/utils/telemetry.py +252 -0
  43. openhands/sdk/logger.py +167 -0
  44. openhands/sdk/mcp/__init__.py +20 -0
  45. openhands/sdk/mcp/client.py +113 -0
  46. openhands/sdk/mcp/definition.py +69 -0
  47. openhands/sdk/mcp/tool.py +104 -0
  48. openhands/sdk/mcp/utils.py +59 -0
  49. openhands/sdk/tests/llm/test_llm.py +447 -0
  50. openhands/sdk/tests/llm/test_llm_fncall_converter.py +691 -0
  51. openhands/sdk/tests/llm/test_model_features.py +221 -0
  52. openhands/sdk/tool/__init__.py +30 -0
  53. openhands/sdk/tool/builtins/__init__.py +34 -0
  54. openhands/sdk/tool/builtins/finish.py +57 -0
  55. openhands/sdk/tool/builtins/think.py +60 -0
  56. openhands/sdk/tool/schema.py +236 -0
  57. openhands/sdk/tool/security_prompt.py +5 -0
  58. openhands/sdk/tool/tool.py +142 -0
  59. openhands/sdk/utils/__init__.py +14 -0
  60. openhands/sdk/utils/discriminated_union.py +210 -0
  61. openhands/sdk/utils/json.py +48 -0
  62. openhands/sdk/utils/truncate.py +44 -0
  63. openhands/tools/__init__.py +44 -0
  64. openhands/tools/execute_bash/__init__.py +30 -0
  65. openhands/tools/execute_bash/constants.py +31 -0
  66. openhands/tools/execute_bash/definition.py +166 -0
  67. openhands/tools/execute_bash/impl.py +38 -0
  68. openhands/tools/execute_bash/metadata.py +101 -0
  69. openhands/tools/execute_bash/terminal/__init__.py +22 -0
  70. openhands/tools/execute_bash/terminal/factory.py +113 -0
  71. openhands/tools/execute_bash/terminal/interface.py +189 -0
  72. openhands/tools/execute_bash/terminal/subprocess_terminal.py +412 -0
  73. openhands/tools/execute_bash/terminal/terminal_session.py +492 -0
  74. openhands/tools/execute_bash/terminal/tmux_terminal.py +160 -0
  75. openhands/tools/execute_bash/utils/command.py +150 -0
  76. openhands/tools/str_replace_editor/__init__.py +17 -0
  77. openhands/tools/str_replace_editor/definition.py +158 -0
  78. openhands/tools/str_replace_editor/editor.py +683 -0
  79. openhands/tools/str_replace_editor/exceptions.py +41 -0
  80. openhands/tools/str_replace_editor/impl.py +66 -0
  81. openhands/tools/str_replace_editor/utils/__init__.py +0 -0
  82. openhands/tools/str_replace_editor/utils/config.py +2 -0
  83. openhands/tools/str_replace_editor/utils/constants.py +9 -0
  84. openhands/tools/str_replace_editor/utils/encoding.py +135 -0
  85. openhands/tools/str_replace_editor/utils/file_cache.py +154 -0
  86. openhands/tools/str_replace_editor/utils/history.py +122 -0
  87. openhands/tools/str_replace_editor/utils/shell.py +72 -0
  88. openhands/tools/task_tracker/__init__.py +16 -0
  89. openhands/tools/task_tracker/definition.py +336 -0
  90. openhands/tools/utils/__init__.py +1 -0
  91. openhands-0.0.0.dist-info/METADATA +3 -0
  92. openhands-0.0.0.dist-info/RECORD +94 -0
  93. openhands-0.0.0.dist-info/WHEEL +5 -0
  94. openhands-0.0.0.dist-info/top_level.txt +1 -0
openhands/__init__.py ADDED
@@ -0,0 +1 @@
1
+ __path__ = __import__("pkgutil").extend_path(__path__, __name__)
@@ -0,0 +1,45 @@
1
+ from importlib.metadata import PackageNotFoundError, version
2
+
3
+ from openhands.sdk.agent import Agent, AgentBase
4
+ from openhands.sdk.context import AgentContext
5
+ from openhands.sdk.conversation import Conversation, ConversationCallbackType
6
+ from openhands.sdk.event import Event, EventBase, LLMConvertibleEvent
7
+ from openhands.sdk.llm import (
8
+ LLM,
9
+ ImageContent,
10
+ LLMRegistry,
11
+ Message,
12
+ RegistryEvent,
13
+ TextContent,
14
+ )
15
+ from openhands.sdk.logger import get_logger
16
+ from openhands.sdk.mcp import MCPClient, MCPTool, create_mcp_tools
17
+ from openhands.sdk.tool import ActionBase, ObservationBase, Tool
18
+
19
+
20
+ __version__ = "1.0.0a0"
21
+
22
+ __all__ = [
23
+ "LLM",
24
+ "LLMRegistry",
25
+ "RegistryEvent",
26
+ "Message",
27
+ "TextContent",
28
+ "ImageContent",
29
+ "Tool",
30
+ "AgentBase",
31
+ "Agent",
32
+ "ActionBase",
33
+ "ObservationBase",
34
+ "MCPClient",
35
+ "MCPTool",
36
+ "create_mcp_tools",
37
+ "get_logger",
38
+ "Conversation",
39
+ "ConversationCallbackType",
40
+ "Event",
41
+ "EventBase",
42
+ "LLMConvertibleEvent",
43
+ "AgentContext",
44
+ "__version__",
45
+ ]
@@ -0,0 +1,8 @@
1
+ from openhands.sdk.agent.agent import Agent
2
+ from openhands.sdk.agent.base import AgentBase
3
+
4
+
5
+ __all__ = [
6
+ "Agent",
7
+ "AgentBase",
8
+ ]
@@ -0,0 +1,6 @@
1
+ from .agent import Agent
2
+
3
+
4
+ __all__ = [
5
+ "Agent",
6
+ ]
@@ -0,0 +1,349 @@
1
+ import json
2
+ from typing import cast
3
+
4
+ from litellm.types.utils import (
5
+ ChatCompletionMessageToolCall,
6
+ Choices,
7
+ Message as LiteLLMMessage,
8
+ )
9
+ from pydantic import ValidationError
10
+
11
+ from openhands.sdk.agent.base import AgentBase
12
+ from openhands.sdk.context import AgentContext, render_template
13
+ from openhands.sdk.context.condenser import Condenser
14
+ from openhands.sdk.context.view import View
15
+ from openhands.sdk.conversation import ConversationCallbackType, ConversationState
16
+ from openhands.sdk.event import (
17
+ ActionEvent,
18
+ AgentErrorEvent,
19
+ LLMConvertibleEvent,
20
+ MessageEvent,
21
+ ObservationEvent,
22
+ SystemPromptEvent,
23
+ )
24
+ from openhands.sdk.event.condenser import Condensation
25
+ from openhands.sdk.event.utils import get_unmatched_actions
26
+ from openhands.sdk.llm import (
27
+ LLM,
28
+ Message,
29
+ MetricsSnapshot,
30
+ TextContent,
31
+ get_llm_metadata,
32
+ )
33
+ from openhands.sdk.logger import get_logger
34
+ from openhands.sdk.tool import (
35
+ BUILT_IN_TOOLS,
36
+ ActionBase,
37
+ FinishTool,
38
+ ObservationBase,
39
+ Tool,
40
+ )
41
+ from openhands.sdk.tool.builtins import FinishAction
42
+
43
+
44
+ logger = get_logger(__name__)
45
+
46
+
47
+ class Agent(AgentBase):
48
+ def __init__(
49
+ self,
50
+ llm: LLM,
51
+ tools: list[Tool],
52
+ agent_context: AgentContext | None = None,
53
+ system_prompt_filename: str = "system_prompt.j2",
54
+ condenser: Condenser | None = None,
55
+ cli_mode: bool = True,
56
+ ) -> None:
57
+ for tool in BUILT_IN_TOOLS:
58
+ assert tool not in tools, (
59
+ f"{tool} is automatically included and should not be provided."
60
+ )
61
+ super().__init__(
62
+ llm=llm,
63
+ tools=tools + BUILT_IN_TOOLS,
64
+ agent_context=agent_context,
65
+ )
66
+
67
+ self.system_message: str = render_template(
68
+ prompt_dir=self.prompt_dir,
69
+ template_name=system_prompt_filename,
70
+ cli_mode=cli_mode,
71
+ )
72
+ if agent_context:
73
+ _system_message_suffix = agent_context.get_system_message_suffix()
74
+ if _system_message_suffix:
75
+ self.system_message += "\n\n" + _system_message_suffix
76
+
77
+ self.condenser = condenser
78
+
79
+ def init_state(
80
+ self,
81
+ state: ConversationState,
82
+ on_event: ConversationCallbackType,
83
+ ) -> None:
84
+ # TODO(openhands): we should add test to test this init_state will actually
85
+ # modify state in-place
86
+ llm_convertible_messages = [
87
+ event for event in state.events if isinstance(event, LLMConvertibleEvent)
88
+ ]
89
+ if len(llm_convertible_messages) == 0:
90
+ # Prepare system message
91
+ event = SystemPromptEvent(
92
+ source="agent",
93
+ system_prompt=TextContent(text=self.system_message),
94
+ tools=[t.to_openai_tool() for t in self.tools.values()],
95
+ )
96
+ on_event(event)
97
+
98
+ def _execute_actions(
99
+ self,
100
+ state: ConversationState,
101
+ action_events: list[ActionEvent],
102
+ on_event: ConversationCallbackType,
103
+ ):
104
+ for action_event in action_events:
105
+ self._execute_action_events(state, action_event, on_event=on_event)
106
+
107
+ def step(
108
+ self,
109
+ state: ConversationState,
110
+ on_event: ConversationCallbackType,
111
+ ) -> None:
112
+ # Check for pending actions (implicit confirmation)
113
+ # and execute them before sampling new actions.
114
+ pending_actions = get_unmatched_actions(state.events)
115
+ if pending_actions:
116
+ logger.info(
117
+ "Confirmation mode: Executing %d pending action(s)",
118
+ len(pending_actions),
119
+ )
120
+ self._execute_actions(state, pending_actions, on_event)
121
+ return
122
+
123
+ # If a condenser is registered with the agent, we need to give it an
124
+ # opportunity to transform the events. This will either produce a list
125
+ # of events, exactly as expected, or a new condensation that needs to be
126
+ # processed before the agent can sample another action.
127
+ if self.condenser is not None:
128
+ view = View.from_events(state.events)
129
+ condensation_result = self.condenser.condense(view)
130
+
131
+ match condensation_result:
132
+ case View():
133
+ llm_convertible_events = condensation_result.events
134
+
135
+ case Condensation():
136
+ on_event(condensation_result)
137
+ return None
138
+
139
+ else:
140
+ llm_convertible_events = cast(
141
+ list[LLMConvertibleEvent],
142
+ [e for e in state.events if isinstance(e, LLMConvertibleEvent)],
143
+ )
144
+
145
+ # Get LLM Response (Action)
146
+ _messages = LLMConvertibleEvent.events_to_messages(llm_convertible_events)
147
+ logger.debug(
148
+ "Sending messages to LLM: "
149
+ f"{json.dumps([m.model_dump() for m in _messages], indent=2)}"
150
+ )
151
+ tools = [tool.to_openai_tool() for tool in self.tools.values()]
152
+ response = self.llm.completion(
153
+ messages=_messages,
154
+ tools=tools,
155
+ extra_body={
156
+ "metadata": get_llm_metadata(
157
+ model_name=self.llm.model, agent_name=self.name
158
+ )
159
+ },
160
+ )
161
+ assert len(response.choices) == 1 and isinstance(response.choices[0], Choices)
162
+ llm_message: LiteLLMMessage = response.choices[0].message # type: ignore
163
+ message = Message.from_litellm_message(llm_message)
164
+
165
+ assert self.llm.metrics is not None, "LLM metrics should not be None"
166
+ metrics = self.llm.metrics.get_snapshot() # take a snapshot of metrics
167
+
168
+ if message.tool_calls and len(message.tool_calls) > 0:
169
+ tool_call: ChatCompletionMessageToolCall
170
+ if any(tc.type != "function" for tc in message.tool_calls):
171
+ logger.warning(
172
+ "LLM returned tool calls but some are not of type 'function' - "
173
+ "ignoring those"
174
+ )
175
+
176
+ tool_calls = [
177
+ tool_call
178
+ for tool_call in message.tool_calls
179
+ if tool_call.type == "function"
180
+ ]
181
+ assert len(tool_calls) > 0, (
182
+ "LLM returned tool calls but none are of type 'function'"
183
+ )
184
+ if not all(isinstance(c, TextContent) for c in message.content):
185
+ logger.warning(
186
+ "LLM returned tool calls but message content is not all "
187
+ "TextContent - ignoring non-text content"
188
+ )
189
+
190
+ # Generate unique batch ID for this LLM response
191
+ thought_content = [c for c in message.content if isinstance(c, TextContent)]
192
+
193
+ action_events: list[ActionEvent] = []
194
+ for i, tool_call in enumerate(tool_calls):
195
+ action_event = self._get_action_events(
196
+ state,
197
+ tool_call,
198
+ llm_response_id=response.id,
199
+ on_event=on_event,
200
+ thought=thought_content
201
+ if i == 0
202
+ else [], # Only first gets thought
203
+ metrics=metrics if i == len(tool_calls) - 1 else None,
204
+ # Only first gets reasoning content
205
+ reasoning_content=message.reasoning_content if i == 0 else None,
206
+ )
207
+ if action_event is None:
208
+ continue
209
+ action_events.append(action_event)
210
+
211
+ # Handle confirmation mode - exit early if actions need confirmation
212
+ if self._requires_user_confirmation(state, action_events):
213
+ return
214
+
215
+ if action_events:
216
+ self._execute_actions(state, action_events, on_event)
217
+
218
+ else:
219
+ logger.info("LLM produced a message response - awaits user input")
220
+ state.agent_finished = True
221
+ msg_event = MessageEvent(
222
+ source="agent", llm_message=message, metrics=metrics
223
+ )
224
+ on_event(msg_event)
225
+
226
+ def _requires_user_confirmation(
227
+ self, state: ConversationState, action_events: list[ActionEvent]
228
+ ) -> bool:
229
+ """
230
+ Decide whether user confirmation is needed to proceed.
231
+
232
+ Rules:
233
+ 1. Confirmation mode is enabled
234
+ 2. Every action requires confirmation
235
+ 3. A single `FinishAction` never requires confirmation
236
+ """
237
+ if len(action_events) == 0:
238
+ return False
239
+
240
+ if len(action_events) == 1 and isinstance(
241
+ action_events[0].action, FinishAction
242
+ ):
243
+ return False
244
+
245
+ if not state.confirmation_mode:
246
+ return False
247
+
248
+ state.agent_waiting_for_confirmation = True
249
+ return True
250
+
251
+ def _get_action_events(
252
+ self,
253
+ state: ConversationState,
254
+ tool_call: ChatCompletionMessageToolCall,
255
+ llm_response_id: str,
256
+ on_event: ConversationCallbackType,
257
+ thought: list[TextContent] = [],
258
+ metrics: MetricsSnapshot | None = None,
259
+ reasoning_content: str | None = None,
260
+ ) -> ActionEvent | None:
261
+ """Handle tool calls from the LLM.
262
+
263
+ NOTE: state will be mutated in-place.
264
+ """
265
+ assert tool_call.type == "function"
266
+ tool_name = tool_call.function.name
267
+ assert tool_name is not None, "Tool call must have a name"
268
+ tool = self.tools.get(tool_name, None)
269
+ # Handle non-existing tools
270
+ if tool is None:
271
+ err = f"Tool '{tool_name}' not found. Available: {list(self.tools.keys())}"
272
+ logger.error(err)
273
+ event = AgentErrorEvent(
274
+ error=err,
275
+ metrics=metrics,
276
+ )
277
+ on_event(event)
278
+ state.agent_finished = True
279
+ return
280
+
281
+ # Validate arguments
282
+ try:
283
+ action: ActionBase = tool.action_type.model_validate(
284
+ json.loads(tool_call.function.arguments)
285
+ )
286
+ except (json.JSONDecodeError, ValidationError) as e:
287
+ err = (
288
+ f"Error validating args {tool_call.function.arguments} for tool "
289
+ f"'{tool.name}': {e}"
290
+ )
291
+ event = AgentErrorEvent(
292
+ error=err,
293
+ metrics=metrics,
294
+ )
295
+ on_event(event)
296
+ return
297
+
298
+ # Create one ActionEvent per action
299
+ action_event = ActionEvent(
300
+ action=action,
301
+ thought=thought,
302
+ reasoning_content=reasoning_content,
303
+ tool_name=tool.name,
304
+ tool_call_id=tool_call.id,
305
+ tool_call=tool_call,
306
+ llm_response_id=llm_response_id,
307
+ metrics=metrics,
308
+ )
309
+ on_event(action_event)
310
+ return action_event
311
+
312
+ def _execute_action_events(
313
+ self,
314
+ state: ConversationState,
315
+ action_event: ActionEvent,
316
+ on_event: ConversationCallbackType,
317
+ ):
318
+ """Execute action events and update the conversation state.
319
+
320
+ It will call the tool's executor and update the state & call callback fn
321
+ with the observation.
322
+ """
323
+ tool = self.tools.get(action_event.tool_name, None)
324
+ if tool is None:
325
+ raise RuntimeError(
326
+ f"Tool '{action_event.tool_name}' not found. This should not happen "
327
+ "as it was checked earlier."
328
+ )
329
+
330
+ # Execute actions!
331
+ if tool.executor is None:
332
+ raise RuntimeError(f"Tool '{tool.name}' has no executor")
333
+ observation: ObservationBase = tool.executor(action_event.action)
334
+ assert isinstance(observation, ObservationBase), (
335
+ f"Tool '{tool.name}' executor must return an ObservationBase"
336
+ )
337
+
338
+ obs_event = ObservationEvent(
339
+ observation=observation,
340
+ action_id=action_event.id,
341
+ tool_name=tool.name,
342
+ tool_call_id=action_event.tool_call.id,
343
+ )
344
+ on_event(obs_event)
345
+
346
+ # Set conversation state
347
+ if tool.name == FinishTool.name:
348
+ state.agent_finished = True
349
+ return obs_event
@@ -0,0 +1,103 @@
1
+ import os
2
+ import sys
3
+ from abc import ABC, abstractmethod
4
+ from types import MappingProxyType
5
+
6
+ from openhands.sdk.context.agent_context import AgentContext
7
+ from openhands.sdk.conversation import ConversationCallbackType, ConversationState
8
+ from openhands.sdk.llm import LLM
9
+ from openhands.sdk.logger import get_logger
10
+ from openhands.sdk.tool import Tool
11
+
12
+
13
+ logger = get_logger(__name__)
14
+
15
+
16
+ class AgentBase(ABC):
17
+ def __init__(
18
+ self,
19
+ llm: LLM,
20
+ tools: list[Tool],
21
+ agent_context: AgentContext | None = None,
22
+ ) -> None:
23
+ """Initializes a new instance of the Agent class.
24
+
25
+ Agent should be Stateless: every step only relies on:
26
+ 1. input ConversationState
27
+ 2. LLM/tools/agent_context that were given in __init__
28
+ """
29
+ self._llm = llm
30
+ self._agent_context = agent_context
31
+
32
+ # Load tools into an immutable dict
33
+ _tools_map = {}
34
+ for tool in tools:
35
+ if tool.name in _tools_map:
36
+ raise ValueError(f"Duplicate tool name: {tool.name}")
37
+ logger.debug(f"Registering tool: {tool}")
38
+ _tools_map[tool.name] = tool
39
+ self._tools = MappingProxyType(_tools_map)
40
+
41
+ @property
42
+ def prompt_dir(self) -> str:
43
+ """Returns the directory where this class's module file is located."""
44
+ module = sys.modules[self.__class__.__module__]
45
+ module_file = module.__file__ # e.g. ".../mypackage/mymodule.py"
46
+ if module_file is None:
47
+ raise ValueError(f"Module file for {module} is None")
48
+ return os.path.join(os.path.dirname(module_file), "prompts")
49
+
50
+ @property
51
+ def name(self) -> str:
52
+ """Returns the name of the Agent."""
53
+ return self.__class__.__name__
54
+
55
+ @property
56
+ def llm(self) -> LLM:
57
+ """Returns the LLM instance used by the Agent."""
58
+ return self._llm
59
+
60
+ @property
61
+ def tools(self) -> MappingProxyType[str, Tool]:
62
+ """Returns an immutable mapping of available tools from name."""
63
+ return self._tools
64
+
65
+ @property
66
+ def agent_context(self) -> AgentContext | None:
67
+ """Returns the agent context used by the Agent."""
68
+ return self._agent_context
69
+
70
+ @abstractmethod
71
+ def init_state(
72
+ self,
73
+ state: ConversationState,
74
+ on_event: ConversationCallbackType,
75
+ ) -> None:
76
+ """Initialize the empty conversation state to prepare the agent for user
77
+ messages.
78
+
79
+ Typically this involves adding system message
80
+
81
+ NOTE: state will be mutated in-place.
82
+ """
83
+ raise NotImplementedError("Subclasses must implement this method.")
84
+
85
+ @abstractmethod
86
+ def step(
87
+ self,
88
+ state: ConversationState,
89
+ on_event: ConversationCallbackType,
90
+ ) -> None:
91
+ """Taking a step in the conversation.
92
+
93
+ Typically this involves:
94
+ 1. Making a LLM call
95
+ 2. Executing the tool
96
+ 3. Updating the conversation state with
97
+ LLM calls (role="assistant") and tool results (role="tool")
98
+ 4.1 If conversation is finished, set state.agent_finished flag
99
+ 4.2 Otherwise, just return, Conversation will kick off the next step
100
+
101
+ NOTE: state will be mutated in-place.
102
+ """
103
+ raise NotImplementedError("Subclasses must implement this method.")
@@ -0,0 +1,28 @@
1
+ from openhands.sdk.context.agent_context import (
2
+ AgentContext,
3
+ )
4
+ from openhands.sdk.context.microagents import (
5
+ BaseMicroagent,
6
+ KnowledgeMicroagent,
7
+ MicroagentKnowledge,
8
+ MicroagentMetadata,
9
+ MicroagentType,
10
+ MicroagentValidationError,
11
+ RepoMicroagent,
12
+ load_microagents_from_dir,
13
+ )
14
+ from openhands.sdk.context.utils import render_template
15
+
16
+
17
+ __all__ = [
18
+ "AgentContext",
19
+ "BaseMicroagent",
20
+ "KnowledgeMicroagent",
21
+ "RepoMicroagent",
22
+ "MicroagentMetadata",
23
+ "MicroagentType",
24
+ "MicroagentKnowledge",
25
+ "load_microagents_from_dir",
26
+ "render_template",
27
+ "MicroagentValidationError",
28
+ ]
@@ -0,0 +1,153 @@
1
+ import pathlib
2
+
3
+ from pydantic import BaseModel, Field, field_validator
4
+
5
+ from openhands.sdk.context.microagents import (
6
+ BaseMicroagent,
7
+ KnowledgeMicroagent,
8
+ MicroagentKnowledge,
9
+ RepoMicroagent,
10
+ )
11
+ from openhands.sdk.context.utils import render_template
12
+ from openhands.sdk.llm import Message, TextContent
13
+ from openhands.sdk.logger import get_logger
14
+
15
+
16
+ logger = get_logger(__name__)
17
+
18
+ PROMPT_DIR = pathlib.Path(__file__).parent / "utils" / "prompts"
19
+
20
+
21
+ class AgentContext(BaseModel):
22
+ """Central structure for managing prompt extension.
23
+
24
+ AgentContext unifies all the contextual inputs that shape how the system
25
+ extends and interprets user prompts. It combines both static environment
26
+ details and dynamic, user-activated extensions from microagents.
27
+
28
+ Specifically, it provides:
29
+ - **Repository context / Repo Microagents**: Information about the active codebase,
30
+ branches, and repo-specific instructions contributed by repo microagents.
31
+ - **Runtime context**: Current execution environment (hosts, working
32
+ directory, secrets, date, etc.).
33
+ - **Conversation instructions**: Optional task- or channel-specific rules
34
+ that constrain or guide the agent’s behavior across the session.
35
+ - **Knowledge Microagents**: Extensible components that can be triggered by user input
36
+ to inject knowledge or domain-specific guidance.
37
+
38
+ Together, these elements make AgentContext the primary container responsible
39
+ for assembling, formatting, and injecting all prompt-relevant context into
40
+ LLM interactions.
41
+ """ # noqa: E501
42
+
43
+ microagents: list[BaseMicroagent] = Field(
44
+ default_factory=list,
45
+ description="List of available microagents that can extend the user's input.",
46
+ )
47
+ system_message_suffix: str | None = Field(
48
+ default=None, description="Optional suffix to append to the system prompt."
49
+ )
50
+ user_message_suffix: str | None = Field(
51
+ default=None, description="Optional suffix to append to the user's message."
52
+ )
53
+
54
+ @field_validator("microagents")
55
+ @classmethod
56
+ def _validate_microagents(cls, v: list[BaseMicroagent], info):
57
+ if not v:
58
+ return v
59
+ # Check for duplicate microagent names
60
+ seen_names = set()
61
+ for microagent in v:
62
+ if microagent.name in seen_names:
63
+ raise ValueError(f"Duplicate microagent name found: {microagent.name}")
64
+ seen_names.add(microagent.name)
65
+ return v
66
+
67
+ def get_system_message_suffix(self) -> str | None:
68
+ """Get the system message with repo microagent content and custom suffix.
69
+
70
+ Custom suffix can typically includes:
71
+ - Repository information (repo name, branch name, PR number, etc.)
72
+ - Runtime information (e.g., available hosts, current date)
73
+ - Conversation instructions (e.g., user preferences, task details)
74
+ - Repository-specific instructions (collected from repo microagents)
75
+ """
76
+ repo_microagents = [
77
+ m for m in self.microagents if isinstance(m, RepoMicroagent)
78
+ ]
79
+ logger.debug(
80
+ f"Triggered {len(repo_microagents)} repository "
81
+ f"microagents: {repo_microagents}"
82
+ )
83
+ # Build the workspace context information
84
+ if repo_microagents:
85
+ # TODO(test): add a test for this rendering to make sure they work
86
+ formatted_text = render_template(
87
+ prompt_dir=str(PROMPT_DIR),
88
+ template_name="system_message_suffix.j2",
89
+ repo_microagents=repo_microagents,
90
+ system_message_suffix=self.system_message_suffix or "",
91
+ ).strip()
92
+ return formatted_text
93
+ elif self.system_message_suffix and self.system_message_suffix.strip():
94
+ return self.system_message_suffix.strip()
95
+ return None
96
+
97
+ def get_user_message_suffix(
98
+ self, user_message: Message, skip_microagent_names: list[str]
99
+ ) -> tuple[TextContent, list[str]] | None:
100
+ """Augment the user’s message with knowledge recalled from microagents.
101
+
102
+ This works by:
103
+ - Extracting the text content of the user message
104
+ - Matching microagent triggers against the query
105
+ - Returning formatted knowledge and triggered microagent names if relevant microagents were triggered
106
+ """ # noqa: E501
107
+
108
+ user_message_suffix = None
109
+ if self.user_message_suffix and self.user_message_suffix.strip():
110
+ user_message_suffix = self.user_message_suffix.strip()
111
+
112
+ query = "\n".join(
113
+ (c.text for c in user_message.content if isinstance(c, TextContent))
114
+ ).strip()
115
+ recalled_knowledge: list[MicroagentKnowledge] = []
116
+ # skip empty queries, but still return user_message_suffix if it exists
117
+ if not query:
118
+ if user_message_suffix:
119
+ return TextContent(text=user_message_suffix), []
120
+ return None
121
+ # Search for microagent triggers in the query
122
+ for microagent in self.microagents:
123
+ if not isinstance(microagent, KnowledgeMicroagent):
124
+ continue
125
+ trigger = microagent.match_trigger(query)
126
+ if trigger and microagent.name not in skip_microagent_names:
127
+ logger.info(
128
+ "Microagent '%s' triggered by keyword '%s'",
129
+ microagent.name,
130
+ trigger,
131
+ )
132
+ recalled_knowledge.append(
133
+ MicroagentKnowledge(
134
+ name=microagent.name,
135
+ trigger=trigger,
136
+ content=microagent.content,
137
+ )
138
+ )
139
+ if recalled_knowledge:
140
+ formatted_microagent_text = render_template(
141
+ prompt_dir=str(PROMPT_DIR),
142
+ template_name="microagent_knowledge_info.j2",
143
+ triggered_agents=recalled_knowledge,
144
+ )
145
+ if user_message_suffix:
146
+ formatted_microagent_text += "\n" + user_message_suffix
147
+ return TextContent(text=formatted_microagent_text), [
148
+ k.name for k in recalled_knowledge
149
+ ]
150
+
151
+ if user_message_suffix:
152
+ return TextContent(text=user_message_suffix), []
153
+ return None