openhands-sdk 1.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openhands/sdk/__init__.py +111 -0
- openhands/sdk/agent/__init__.py +8 -0
- openhands/sdk/agent/agent.py +650 -0
- openhands/sdk/agent/base.py +457 -0
- openhands/sdk/agent/prompts/in_context_learning_example.j2 +169 -0
- openhands/sdk/agent/prompts/in_context_learning_example_suffix.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/anthropic_claude.j2 +3 -0
- openhands/sdk/agent/prompts/model_specific/google_gemini.j2 +1 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5-codex.j2 +2 -0
- openhands/sdk/agent/prompts/model_specific/openai_gpt/gpt-5.j2 +3 -0
- openhands/sdk/agent/prompts/security_policy.j2 +22 -0
- openhands/sdk/agent/prompts/security_risk_assessment.j2 +21 -0
- openhands/sdk/agent/prompts/self_documentation.j2 +15 -0
- openhands/sdk/agent/prompts/system_prompt.j2 +132 -0
- openhands/sdk/agent/prompts/system_prompt_interactive.j2 +14 -0
- openhands/sdk/agent/prompts/system_prompt_long_horizon.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_planning.j2 +40 -0
- openhands/sdk/agent/prompts/system_prompt_tech_philosophy.j2 +122 -0
- openhands/sdk/agent/utils.py +228 -0
- openhands/sdk/context/__init__.py +28 -0
- openhands/sdk/context/agent_context.py +264 -0
- openhands/sdk/context/condenser/__init__.py +18 -0
- openhands/sdk/context/condenser/base.py +100 -0
- openhands/sdk/context/condenser/llm_summarizing_condenser.py +248 -0
- openhands/sdk/context/condenser/no_op_condenser.py +14 -0
- openhands/sdk/context/condenser/pipeline_condenser.py +56 -0
- openhands/sdk/context/condenser/prompts/summarizing_prompt.j2 +59 -0
- openhands/sdk/context/condenser/utils.py +149 -0
- openhands/sdk/context/prompts/__init__.py +6 -0
- openhands/sdk/context/prompts/prompt.py +114 -0
- openhands/sdk/context/prompts/templates/ask_agent_template.j2 +11 -0
- openhands/sdk/context/prompts/templates/skill_knowledge_info.j2 +8 -0
- openhands/sdk/context/prompts/templates/system_message_suffix.j2 +32 -0
- openhands/sdk/context/skills/__init__.py +28 -0
- openhands/sdk/context/skills/exceptions.py +11 -0
- openhands/sdk/context/skills/skill.py +720 -0
- openhands/sdk/context/skills/trigger.py +36 -0
- openhands/sdk/context/skills/types.py +48 -0
- openhands/sdk/context/view.py +503 -0
- openhands/sdk/conversation/__init__.py +40 -0
- openhands/sdk/conversation/base.py +281 -0
- openhands/sdk/conversation/conversation.py +152 -0
- openhands/sdk/conversation/conversation_stats.py +85 -0
- openhands/sdk/conversation/event_store.py +157 -0
- openhands/sdk/conversation/events_list_base.py +17 -0
- openhands/sdk/conversation/exceptions.py +50 -0
- openhands/sdk/conversation/fifo_lock.py +133 -0
- openhands/sdk/conversation/impl/__init__.py +5 -0
- openhands/sdk/conversation/impl/local_conversation.py +665 -0
- openhands/sdk/conversation/impl/remote_conversation.py +956 -0
- openhands/sdk/conversation/persistence_const.py +9 -0
- openhands/sdk/conversation/response_utils.py +41 -0
- openhands/sdk/conversation/secret_registry.py +126 -0
- openhands/sdk/conversation/serialization_diff.py +0 -0
- openhands/sdk/conversation/state.py +392 -0
- openhands/sdk/conversation/stuck_detector.py +311 -0
- openhands/sdk/conversation/title_utils.py +191 -0
- openhands/sdk/conversation/types.py +45 -0
- openhands/sdk/conversation/visualizer/__init__.py +12 -0
- openhands/sdk/conversation/visualizer/base.py +67 -0
- openhands/sdk/conversation/visualizer/default.py +373 -0
- openhands/sdk/critic/__init__.py +15 -0
- openhands/sdk/critic/base.py +38 -0
- openhands/sdk/critic/impl/__init__.py +12 -0
- openhands/sdk/critic/impl/agent_finished.py +83 -0
- openhands/sdk/critic/impl/empty_patch.py +49 -0
- openhands/sdk/critic/impl/pass_critic.py +42 -0
- openhands/sdk/event/__init__.py +42 -0
- openhands/sdk/event/base.py +149 -0
- openhands/sdk/event/condenser.py +82 -0
- openhands/sdk/event/conversation_error.py +25 -0
- openhands/sdk/event/conversation_state.py +104 -0
- openhands/sdk/event/llm_completion_log.py +39 -0
- openhands/sdk/event/llm_convertible/__init__.py +20 -0
- openhands/sdk/event/llm_convertible/action.py +139 -0
- openhands/sdk/event/llm_convertible/message.py +142 -0
- openhands/sdk/event/llm_convertible/observation.py +141 -0
- openhands/sdk/event/llm_convertible/system.py +61 -0
- openhands/sdk/event/token.py +16 -0
- openhands/sdk/event/types.py +11 -0
- openhands/sdk/event/user_action.py +21 -0
- openhands/sdk/git/exceptions.py +43 -0
- openhands/sdk/git/git_changes.py +249 -0
- openhands/sdk/git/git_diff.py +129 -0
- openhands/sdk/git/models.py +21 -0
- openhands/sdk/git/utils.py +189 -0
- openhands/sdk/hooks/__init__.py +30 -0
- openhands/sdk/hooks/config.py +180 -0
- openhands/sdk/hooks/conversation_hooks.py +227 -0
- openhands/sdk/hooks/executor.py +155 -0
- openhands/sdk/hooks/manager.py +170 -0
- openhands/sdk/hooks/types.py +40 -0
- openhands/sdk/io/__init__.py +6 -0
- openhands/sdk/io/base.py +48 -0
- openhands/sdk/io/cache.py +85 -0
- openhands/sdk/io/local.py +119 -0
- openhands/sdk/io/memory.py +54 -0
- openhands/sdk/llm/__init__.py +45 -0
- openhands/sdk/llm/exceptions/__init__.py +45 -0
- openhands/sdk/llm/exceptions/classifier.py +50 -0
- openhands/sdk/llm/exceptions/mapping.py +54 -0
- openhands/sdk/llm/exceptions/types.py +101 -0
- openhands/sdk/llm/llm.py +1140 -0
- openhands/sdk/llm/llm_registry.py +122 -0
- openhands/sdk/llm/llm_response.py +59 -0
- openhands/sdk/llm/message.py +656 -0
- openhands/sdk/llm/mixins/fn_call_converter.py +1288 -0
- openhands/sdk/llm/mixins/non_native_fc.py +97 -0
- openhands/sdk/llm/options/__init__.py +1 -0
- openhands/sdk/llm/options/chat_options.py +93 -0
- openhands/sdk/llm/options/common.py +19 -0
- openhands/sdk/llm/options/responses_options.py +67 -0
- openhands/sdk/llm/router/__init__.py +10 -0
- openhands/sdk/llm/router/base.py +117 -0
- openhands/sdk/llm/router/impl/multimodal.py +76 -0
- openhands/sdk/llm/router/impl/random.py +22 -0
- openhands/sdk/llm/streaming.py +9 -0
- openhands/sdk/llm/utils/metrics.py +312 -0
- openhands/sdk/llm/utils/model_features.py +192 -0
- openhands/sdk/llm/utils/model_info.py +90 -0
- openhands/sdk/llm/utils/model_prompt_spec.py +98 -0
- openhands/sdk/llm/utils/retry_mixin.py +128 -0
- openhands/sdk/llm/utils/telemetry.py +362 -0
- openhands/sdk/llm/utils/unverified_models.py +156 -0
- openhands/sdk/llm/utils/verified_models.py +65 -0
- openhands/sdk/logger/__init__.py +22 -0
- openhands/sdk/logger/logger.py +195 -0
- openhands/sdk/logger/rolling.py +113 -0
- openhands/sdk/mcp/__init__.py +24 -0
- openhands/sdk/mcp/client.py +76 -0
- openhands/sdk/mcp/definition.py +106 -0
- openhands/sdk/mcp/exceptions.py +19 -0
- openhands/sdk/mcp/tool.py +270 -0
- openhands/sdk/mcp/utils.py +83 -0
- openhands/sdk/observability/__init__.py +4 -0
- openhands/sdk/observability/laminar.py +166 -0
- openhands/sdk/observability/utils.py +20 -0
- openhands/sdk/py.typed +0 -0
- openhands/sdk/secret/__init__.py +19 -0
- openhands/sdk/secret/secrets.py +92 -0
- openhands/sdk/security/__init__.py +6 -0
- openhands/sdk/security/analyzer.py +111 -0
- openhands/sdk/security/confirmation_policy.py +61 -0
- openhands/sdk/security/llm_analyzer.py +29 -0
- openhands/sdk/security/risk.py +100 -0
- openhands/sdk/tool/__init__.py +34 -0
- openhands/sdk/tool/builtins/__init__.py +34 -0
- openhands/sdk/tool/builtins/finish.py +106 -0
- openhands/sdk/tool/builtins/think.py +117 -0
- openhands/sdk/tool/registry.py +184 -0
- openhands/sdk/tool/schema.py +286 -0
- openhands/sdk/tool/spec.py +39 -0
- openhands/sdk/tool/tool.py +481 -0
- openhands/sdk/utils/__init__.py +22 -0
- openhands/sdk/utils/async_executor.py +115 -0
- openhands/sdk/utils/async_utils.py +39 -0
- openhands/sdk/utils/cipher.py +68 -0
- openhands/sdk/utils/command.py +90 -0
- openhands/sdk/utils/deprecation.py +166 -0
- openhands/sdk/utils/github.py +44 -0
- openhands/sdk/utils/json.py +48 -0
- openhands/sdk/utils/models.py +570 -0
- openhands/sdk/utils/paging.py +63 -0
- openhands/sdk/utils/pydantic_diff.py +85 -0
- openhands/sdk/utils/pydantic_secrets.py +64 -0
- openhands/sdk/utils/truncate.py +117 -0
- openhands/sdk/utils/visualize.py +58 -0
- openhands/sdk/workspace/__init__.py +17 -0
- openhands/sdk/workspace/base.py +158 -0
- openhands/sdk/workspace/local.py +189 -0
- openhands/sdk/workspace/models.py +35 -0
- openhands/sdk/workspace/remote/__init__.py +8 -0
- openhands/sdk/workspace/remote/async_remote_workspace.py +149 -0
- openhands/sdk/workspace/remote/base.py +164 -0
- openhands/sdk/workspace/remote/remote_workspace_mixin.py +323 -0
- openhands/sdk/workspace/workspace.py +49 -0
- openhands_sdk-1.7.3.dist-info/METADATA +17 -0
- openhands_sdk-1.7.3.dist-info/RECORD +180 -0
- openhands_sdk-1.7.3.dist-info/WHEEL +5 -0
- openhands_sdk-1.7.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from openhands.sdk.context.condenser.base import CondenserBase
|
|
2
|
+
from openhands.sdk.context.view import View
|
|
3
|
+
from openhands.sdk.event.condenser import Condensation
|
|
4
|
+
from openhands.sdk.llm import LLM
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class PipelineCondenser(CondenserBase):
|
|
8
|
+
"""A condenser that applies a sequence of condensers in order.
|
|
9
|
+
|
|
10
|
+
All condensers are defined primarily by their `condense` method, which takes a
|
|
11
|
+
`View` and an optional `agent_llm` parameter, returning either a new `View` or a
|
|
12
|
+
`Condensation` event. That means we can chain multiple condensers together by
|
|
13
|
+
passing `View`s along and exiting early if any condenser returns a `Condensation`.
|
|
14
|
+
|
|
15
|
+
For example:
|
|
16
|
+
|
|
17
|
+
# Use the pipeline condenser to chain multiple other condensers together
|
|
18
|
+
condenser = PipelineCondenser(condensers=[
|
|
19
|
+
CondenserA(...),
|
|
20
|
+
CondenserB(...),
|
|
21
|
+
CondenserC(...),
|
|
22
|
+
])
|
|
23
|
+
|
|
24
|
+
result = condenser.condense(view, agent_llm=agent_llm)
|
|
25
|
+
|
|
26
|
+
# Doing the same thing without the pipeline condenser requires more boilerplate
|
|
27
|
+
# for the monadic chaining
|
|
28
|
+
other_result = view
|
|
29
|
+
|
|
30
|
+
if isinstance(other_result, View):
|
|
31
|
+
other_result = CondenserA(...).condense(other_result, agent_llm=agent_llm)
|
|
32
|
+
|
|
33
|
+
if isinstance(other_result, View):
|
|
34
|
+
other_result = CondenserB(...).condense(other_result, agent_llm=agent_llm)
|
|
35
|
+
|
|
36
|
+
if isinstance(other_result, View):
|
|
37
|
+
other_result = CondenserC(...).condense(other_result, agent_llm=agent_llm)
|
|
38
|
+
|
|
39
|
+
assert result == other_result
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
condensers: list[CondenserBase]
|
|
43
|
+
"""The list of condensers to apply in order."""
|
|
44
|
+
|
|
45
|
+
def condense(self, view: View, agent_llm: LLM | None = None) -> View | Condensation:
|
|
46
|
+
result: View | Condensation = view
|
|
47
|
+
for condenser in self.condensers:
|
|
48
|
+
if isinstance(result, Condensation):
|
|
49
|
+
break
|
|
50
|
+
result = condenser.condense(result, agent_llm=agent_llm)
|
|
51
|
+
return result
|
|
52
|
+
|
|
53
|
+
def handles_condensation_requests(self) -> bool:
|
|
54
|
+
return any(
|
|
55
|
+
condenser.handles_condensation_requests() for condenser in self.condensers
|
|
56
|
+
)
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
You are maintaining a context-aware state summary for an interactive agent.
|
|
2
|
+
You will be given a list of events corresponding to actions taken by the agent, and the most recent previous summary if one exists.
|
|
3
|
+
If the events being summarized contain ANY task-tracking, you MUST include a TASK_TRACKING section to maintain continuity.
|
|
4
|
+
When referencing tasks make sure to preserve exact task IDs and statuses.
|
|
5
|
+
|
|
6
|
+
Track:
|
|
7
|
+
|
|
8
|
+
USER_CONTEXT: (Preserve essential user requirements, goals, and clarifications in concise form)
|
|
9
|
+
|
|
10
|
+
TASK_TRACKING: {Active tasks, their IDs and statuses - PRESERVE TASK IDs}
|
|
11
|
+
|
|
12
|
+
COMPLETED: (Tasks completed so far, with brief results)
|
|
13
|
+
PENDING: (Tasks that still need to be done)
|
|
14
|
+
CURRENT_STATE: (Current variables, data structures, or relevant state)
|
|
15
|
+
|
|
16
|
+
For code-specific tasks, also include:
|
|
17
|
+
CODE_STATE: {File paths, function signatures, data structures}
|
|
18
|
+
TESTS: {Failing cases, error messages, outputs}
|
|
19
|
+
CHANGES: {Code edits, variable updates}
|
|
20
|
+
DEPS: {Dependencies, imports, external calls}
|
|
21
|
+
VERSION_CONTROL_STATUS: {Repository state, current branch, PR status, commit history}
|
|
22
|
+
|
|
23
|
+
PRIORITIZE:
|
|
24
|
+
1. Adapt tracking format to match the actual task type
|
|
25
|
+
2. Capture key user requirements and goals
|
|
26
|
+
3. Distinguish between completed and pending tasks
|
|
27
|
+
4. Keep all sections concise and relevant
|
|
28
|
+
|
|
29
|
+
SKIP: Tracking irrelevant details for the current task type
|
|
30
|
+
|
|
31
|
+
Example formats:
|
|
32
|
+
|
|
33
|
+
For code tasks:
|
|
34
|
+
USER_CONTEXT: Fix FITS card float representation issue
|
|
35
|
+
COMPLETED: Modified mod_float() in card.py, all tests passing
|
|
36
|
+
PENDING: Create PR, update documentation
|
|
37
|
+
CODE_STATE: mod_float() in card.py updated
|
|
38
|
+
TESTS: test_format() passed
|
|
39
|
+
CHANGES: str(val) replaces f"{val:.16G}"
|
|
40
|
+
DEPS: None modified
|
|
41
|
+
VERSION_CONTROL_STATUS: Branch: fix-float-precision, Latest commit: a1b2c3d
|
|
42
|
+
|
|
43
|
+
For other tasks:
|
|
44
|
+
USER_CONTEXT: Write 20 haikus based on coin flip results
|
|
45
|
+
COMPLETED: 15 haikus written for results [T,H,T,H,T,H,T,T,H,T,H,T,H,T,H]
|
|
46
|
+
PENDING: 5 more haikus needed
|
|
47
|
+
CURRENT_STATE: Last flip: Heads, Haiku count: 15/20
|
|
48
|
+
|
|
49
|
+
<PREVIOUS SUMMARY>
|
|
50
|
+
{{ previous_summary }}
|
|
51
|
+
</PREVIOUS SUMMARY>
|
|
52
|
+
|
|
53
|
+
{% for event in events %}
|
|
54
|
+
<EVENT>
|
|
55
|
+
{{ event }}
|
|
56
|
+
</EVENT>
|
|
57
|
+
{% endfor %}
|
|
58
|
+
|
|
59
|
+
Now summarize the events using the rules above.
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from collections.abc import Sequence
|
|
2
|
+
|
|
3
|
+
from openhands.sdk.event.base import LLMConvertibleEvent
|
|
4
|
+
from openhands.sdk.llm import LLM
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def get_total_token_count(
|
|
8
|
+
events: Sequence[LLMConvertibleEvent],
|
|
9
|
+
llm: LLM,
|
|
10
|
+
) -> int:
|
|
11
|
+
"""Calculate the total token count for a list of LLM convertible events.
|
|
12
|
+
|
|
13
|
+
This function converts the events to LLM messages and uses the provided LLM
|
|
14
|
+
to count the total number of tokens. This is useful for understanding how many
|
|
15
|
+
tokens a sequence of events will consume in the context window.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
events: List of LLM convertible events to count tokens for
|
|
19
|
+
llm: The LLM instance to use for token counting (uses the litellm's token
|
|
20
|
+
counting utilities)
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Total token count for all events converted to messages
|
|
24
|
+
|
|
25
|
+
Example:
|
|
26
|
+
>>> from openhands.sdk.llm import LLM
|
|
27
|
+
>>> from openhands.sdk.event.llm_convertible import MessageEvent
|
|
28
|
+
>>>
|
|
29
|
+
>>> llm = LLM(model="gpt-4")
|
|
30
|
+
>>> events = [
|
|
31
|
+
... MessageEvent.from_text("Hello, how are you?", source="user"),
|
|
32
|
+
... MessageEvent.from_text("I'm doing great!", source="agent"),
|
|
33
|
+
... ]
|
|
34
|
+
>>> token_count = get_total_token_count(events, llm)
|
|
35
|
+
>>> print(f"Total tokens: {token_count}")
|
|
36
|
+
"""
|
|
37
|
+
messages = LLMConvertibleEvent.events_to_messages(list(events))
|
|
38
|
+
return llm.get_token_count(messages)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_shortest_prefix_above_token_count(
|
|
42
|
+
events: Sequence[LLMConvertibleEvent],
|
|
43
|
+
llm: LLM,
|
|
44
|
+
token_count: int,
|
|
45
|
+
) -> int:
|
|
46
|
+
"""Find the length of the shortest prefix whose token count exceeds the target.
|
|
47
|
+
|
|
48
|
+
This function performs a binary search to efficiently find the shortest prefix
|
|
49
|
+
of events that, when converted to messages, has a total token count greater than
|
|
50
|
+
the specified target token count.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
events: List of LLM convertible events to search through
|
|
54
|
+
llm: The LLM instance to use for token counting (uses the model's tokenizer)
|
|
55
|
+
token_count: The target token count threshold
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
The length of the shortest prefix that exceeds the token count.
|
|
59
|
+
Returns 0 if no events are provided.
|
|
60
|
+
Returns len(events) if all events combined don't exceed the token count.
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
>>> from openhands.sdk.llm import LLM
|
|
64
|
+
>>> from openhands.sdk.event.llm_convertible import MessageEvent
|
|
65
|
+
>>>
|
|
66
|
+
>>> llm = LLM(model="gpt-4")
|
|
67
|
+
>>> events = [
|
|
68
|
+
... MessageEvent.from_text("Hi", source="user"),
|
|
69
|
+
... MessageEvent.from_text("Hello", source="agent"),
|
|
70
|
+
... MessageEvent.from_text("How are you?", source="user"),
|
|
71
|
+
... MessageEvent.from_text("Great!", source="agent"),
|
|
72
|
+
... ]
|
|
73
|
+
>>> prefix_len = get_shortest_prefix_above_token_count(events, llm, 20)
|
|
74
|
+
>>> # prefix_len might be 2 if first 2 events exceed 20 tokens
|
|
75
|
+
"""
|
|
76
|
+
if not events:
|
|
77
|
+
return 0
|
|
78
|
+
|
|
79
|
+
# Check if all events combined don't exceed the token count
|
|
80
|
+
total_tokens = get_total_token_count(events, llm)
|
|
81
|
+
if total_tokens <= token_count:
|
|
82
|
+
return len(events)
|
|
83
|
+
|
|
84
|
+
# Binary search for the shortest prefix
|
|
85
|
+
left, right = 1, len(events)
|
|
86
|
+
|
|
87
|
+
while left < right:
|
|
88
|
+
mid = (left + right) // 2
|
|
89
|
+
prefix_tokens = get_total_token_count(events[:mid], llm)
|
|
90
|
+
|
|
91
|
+
if prefix_tokens > token_count:
|
|
92
|
+
# This prefix exceeds the count, try to find a shorter one
|
|
93
|
+
right = mid
|
|
94
|
+
else:
|
|
95
|
+
# This prefix doesn't exceed, we need a longer one
|
|
96
|
+
left = mid + 1
|
|
97
|
+
|
|
98
|
+
return left
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_suffix_length_for_token_reduction(
|
|
102
|
+
events: Sequence[LLMConvertibleEvent],
|
|
103
|
+
llm: LLM,
|
|
104
|
+
token_reduction: int,
|
|
105
|
+
) -> int:
|
|
106
|
+
"""Find how many suffix events can be kept while reducing tokens by target amount.
|
|
107
|
+
|
|
108
|
+
This function determines the maximum number of events from the end of the list
|
|
109
|
+
that can be retained while ensuring the total token count is reduced by at least
|
|
110
|
+
the specified amount. It uses the get_shortest_prefix_above_token_count function
|
|
111
|
+
to find the prefix that must be removed.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
events: List of LLM convertible events
|
|
115
|
+
llm: The LLM instance to use for token counting (uses the model's tokenizer)
|
|
116
|
+
token_reduction: The minimum number of tokens to reduce by
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
The number of events from the end that can be kept (suffix length).
|
|
120
|
+
|
|
121
|
+
Example:
|
|
122
|
+
>>> from openhands.sdk.llm import LLM
|
|
123
|
+
>>> from openhands.sdk.event.llm_convertible import MessageEvent
|
|
124
|
+
>>>
|
|
125
|
+
>>> llm = LLM(model="gpt-4")
|
|
126
|
+
>>> events = [
|
|
127
|
+
... MessageEvent.from_text("Event 1", source="user"),
|
|
128
|
+
... MessageEvent.from_text("Event 2", source="agent"),
|
|
129
|
+
... MessageEvent.from_text("Event 3", source="user"),
|
|
130
|
+
... MessageEvent.from_text("Event 4", source="agent"),
|
|
131
|
+
... ]
|
|
132
|
+
>>> # Suppose total is 100 tokens, and we want to reduce by 40 tokens
|
|
133
|
+
>>> suffix_len = get_suffix_length_for_token_reduction(events, llm, 40)
|
|
134
|
+
>>> # suffix_len tells us how many events from the end we can keep
|
|
135
|
+
>>> # If first 2 events = 45 tokens, suffix_len = 2 (keep last 2 events)
|
|
136
|
+
"""
|
|
137
|
+
if not events:
|
|
138
|
+
return 0
|
|
139
|
+
|
|
140
|
+
if token_reduction <= 0:
|
|
141
|
+
return len(events)
|
|
142
|
+
|
|
143
|
+
# Find the shortest prefix that exceeds the token reduction target
|
|
144
|
+
prefix_length = get_shortest_prefix_above_token_count(events, llm, token_reduction)
|
|
145
|
+
|
|
146
|
+
# The suffix length is what remains after removing the prefix
|
|
147
|
+
suffix_length = len(events) - prefix_length
|
|
148
|
+
|
|
149
|
+
return suffix_length
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# prompt_utils.py
|
|
2
|
+
import os
|
|
3
|
+
import re
|
|
4
|
+
import sys
|
|
5
|
+
from functools import lru_cache
|
|
6
|
+
|
|
7
|
+
from jinja2 import (
|
|
8
|
+
BaseLoader,
|
|
9
|
+
Environment,
|
|
10
|
+
FileSystemBytecodeCache,
|
|
11
|
+
Template,
|
|
12
|
+
TemplateNotFound,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class FlexibleFileSystemLoader(BaseLoader):
|
|
17
|
+
"""A Jinja2 loader that supports both relative paths (within a base directory)
|
|
18
|
+
and absolute paths anywhere on the filesystem.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, searchpath: str):
|
|
22
|
+
self.searchpath = os.path.abspath(searchpath)
|
|
23
|
+
|
|
24
|
+
def get_source(self, environment, template): # noqa: ARG002
|
|
25
|
+
# If template is an absolute path, use it directly
|
|
26
|
+
if os.path.isabs(template):
|
|
27
|
+
path = template
|
|
28
|
+
else:
|
|
29
|
+
# Otherwise, look for it in the searchpath
|
|
30
|
+
path = os.path.join(self.searchpath, template)
|
|
31
|
+
|
|
32
|
+
if not os.path.exists(path):
|
|
33
|
+
raise TemplateNotFound(template)
|
|
34
|
+
|
|
35
|
+
mtime = os.path.getmtime(path)
|
|
36
|
+
with open(path, encoding="utf-8") as f:
|
|
37
|
+
source = f.read()
|
|
38
|
+
|
|
39
|
+
def uptodate():
|
|
40
|
+
try:
|
|
41
|
+
return os.path.getmtime(path) == mtime
|
|
42
|
+
except OSError:
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
return source, path, uptodate
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def refine(text: str) -> str:
|
|
49
|
+
if sys.platform == "win32":
|
|
50
|
+
text = re.sub(r"\bterminal\b", "execute_powershell", text, flags=re.IGNORECASE)
|
|
51
|
+
text = re.sub(
|
|
52
|
+
r"(?<!execute_)(?<!_)\bbash\b", "powershell", text, flags=re.IGNORECASE
|
|
53
|
+
)
|
|
54
|
+
return text
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@lru_cache(maxsize=64)
|
|
58
|
+
def _get_env(prompt_dir: str) -> Environment:
|
|
59
|
+
if not prompt_dir:
|
|
60
|
+
raise ValueError("prompt_dir is required")
|
|
61
|
+
# BytecodeCache avoids reparsing templates across processes
|
|
62
|
+
# Use user-specific cache directory to avoid permission issues
|
|
63
|
+
# in multi-user environments
|
|
64
|
+
cache_folder = os.path.join(os.path.expanduser("~"), ".openhands", "cache", "jinja")
|
|
65
|
+
os.makedirs(cache_folder, exist_ok=True)
|
|
66
|
+
bcc = FileSystemBytecodeCache(directory=cache_folder)
|
|
67
|
+
env = Environment(
|
|
68
|
+
loader=FlexibleFileSystemLoader(prompt_dir),
|
|
69
|
+
bytecode_cache=bcc,
|
|
70
|
+
autoescape=False,
|
|
71
|
+
)
|
|
72
|
+
# Optional: expose refine as a filter so templates can use {{ text|refine }}
|
|
73
|
+
env.filters["refine"] = refine
|
|
74
|
+
return env
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@lru_cache(maxsize=256)
|
|
78
|
+
def _get_template(prompt_dir: str, template_name: str) -> Template:
|
|
79
|
+
env = _get_env(prompt_dir)
|
|
80
|
+
try:
|
|
81
|
+
return env.get_template(template_name)
|
|
82
|
+
except Exception:
|
|
83
|
+
raise FileNotFoundError(
|
|
84
|
+
f"Prompt file {os.path.join(prompt_dir, template_name)} not found"
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def render_template(prompt_dir: str, template_name: str, **ctx) -> str:
|
|
89
|
+
"""Render a Jinja2 template.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
prompt_dir: The base directory for relative template paths.
|
|
93
|
+
template_name: The template filename. Can be either:
|
|
94
|
+
- A relative filename (e.g., "system_prompt.j2") loaded from prompt_dir
|
|
95
|
+
- An absolute path (e.g., "/path/to/custom_prompt.j2")
|
|
96
|
+
**ctx: Template context variables.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Rendered template string.
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
FileNotFoundError: If the template file cannot be found.
|
|
103
|
+
"""
|
|
104
|
+
# If template_name is an absolute path, extract directory and filename
|
|
105
|
+
if os.path.isabs(template_name):
|
|
106
|
+
# Check if the file exists before trying to load it
|
|
107
|
+
if not os.path.isfile(template_name):
|
|
108
|
+
raise FileNotFoundError(f"Prompt file {template_name} not found")
|
|
109
|
+
actual_dir = os.path.dirname(template_name)
|
|
110
|
+
actual_filename = os.path.basename(template_name)
|
|
111
|
+
tpl = _get_template(actual_dir, actual_filename)
|
|
112
|
+
else:
|
|
113
|
+
tpl = _get_template(prompt_dir, template_name)
|
|
114
|
+
return refine(tpl.render(**ctx).strip())
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
{% for agent_info in triggered_agents %}
|
|
2
|
+
<EXTRA_INFO>
|
|
3
|
+
The following information has been included based on a keyword match for "{{ agent_info.trigger }}".
|
|
4
|
+
It may or may not be relevant to the user's request.
|
|
5
|
+
|
|
6
|
+
{{ agent_info.content }}
|
|
7
|
+
</EXTRA_INFO>
|
|
8
|
+
{% endfor %}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{% if repo_skills %}
|
|
2
|
+
<REPO_CONTEXT>
|
|
3
|
+
The following information has been included based on several files defined in user's repository.
|
|
4
|
+
Please follow them while working.
|
|
5
|
+
|
|
6
|
+
{% for agent_info in repo_skills %}
|
|
7
|
+
[BEGIN context from [{{ agent_info.name }}]]
|
|
8
|
+
{{ agent_info.content }}
|
|
9
|
+
[END Context]
|
|
10
|
+
{% endfor %}
|
|
11
|
+
</REPO_CONTEXT>
|
|
12
|
+
{% endif %}
|
|
13
|
+
{% if system_message_suffix %}
|
|
14
|
+
|
|
15
|
+
{{ system_message_suffix }}
|
|
16
|
+
{% endif %}
|
|
17
|
+
{% if secret_infos %}
|
|
18
|
+
<CUSTOM_SECRETS>
|
|
19
|
+
### Credential Access
|
|
20
|
+
* Automatic secret injection: When you reference a registered secret key in your bash command, the secret value will be automatically exported as an environment variable before your command executes.
|
|
21
|
+
* How to use secrets: Simply reference the secret key in your command (e.g., `echo ${GITHUB_TOKEN:0:8}` or `curl -H "Authorization: Bearer $API_KEY" https://api.example.com`). The system will detect the key name in your command text and export it as environment variable before it executes your command.
|
|
22
|
+
* Secret detection: The system performs case-insensitive matching to find secret keys in your command text. If a registered secret key appears anywhere in your command, its value will be made available as an environment variable.
|
|
23
|
+
* Security: Secret values are automatically masked in command output to prevent accidental exposure. You will see `<secret-hidden>` instead of the actual secret value in the output.
|
|
24
|
+
* Refreshing expired secrets: Some secrets (like GITHUB_TOKEN) may be updated periodically or expire over time. If a secret stops working (e.g., authentication failures), try using it again in a new command - the system should automatically use the refreshed value. For example, if GITHUB_TOKEN was used in a git remote URL and later expired, you can update the remote URL with the current token: `git remote set-url origin https://${GITHUB_TOKEN}@github.com/username/repo.git` to pick up the refreshed token value.
|
|
25
|
+
* If it still fails, report it to the user.
|
|
26
|
+
|
|
27
|
+
You have access to the following environment variables
|
|
28
|
+
{% for secret_info in secret_infos %}
|
|
29
|
+
* **${{ secret_info.name }}**{% if secret_info.description %} - {{ secret_info.description }}{% endif %}
|
|
30
|
+
{% endfor %}
|
|
31
|
+
</CUSTOM_SECRETS>
|
|
32
|
+
{% endif %}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from openhands.sdk.context.skills.exceptions import SkillValidationError
|
|
2
|
+
from openhands.sdk.context.skills.skill import (
|
|
3
|
+
Skill,
|
|
4
|
+
load_project_skills,
|
|
5
|
+
load_public_skills,
|
|
6
|
+
load_skills_from_dir,
|
|
7
|
+
load_user_skills,
|
|
8
|
+
)
|
|
9
|
+
from openhands.sdk.context.skills.trigger import (
|
|
10
|
+
BaseTrigger,
|
|
11
|
+
KeywordTrigger,
|
|
12
|
+
TaskTrigger,
|
|
13
|
+
)
|
|
14
|
+
from openhands.sdk.context.skills.types import SkillKnowledge
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"Skill",
|
|
19
|
+
"BaseTrigger",
|
|
20
|
+
"KeywordTrigger",
|
|
21
|
+
"TaskTrigger",
|
|
22
|
+
"SkillKnowledge",
|
|
23
|
+
"load_skills_from_dir",
|
|
24
|
+
"load_user_skills",
|
|
25
|
+
"load_project_skills",
|
|
26
|
+
"load_public_skills",
|
|
27
|
+
"SkillValidationError",
|
|
28
|
+
]
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
class SkillError(Exception):
|
|
2
|
+
"""Base exception for all skill errors."""
|
|
3
|
+
|
|
4
|
+
pass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class SkillValidationError(SkillError):
|
|
8
|
+
"""Raised when there's a validation error in skill metadata."""
|
|
9
|
+
|
|
10
|
+
def __init__(self, message: str = "Skill validation failed") -> None:
|
|
11
|
+
super().__init__(message)
|