kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,672 @@
|
|
|
1
|
+
"""Explore Mode Strategy Interface and Implementation
|
|
2
|
+
|
|
3
|
+
This module defines the ExploreStrategy abstract interface and provides two concrete implementations:
|
|
4
|
+
- PromptStrategy: Prompt mode, invoking tools in the prompt using the =># format
|
|
5
|
+
- ToolCallStrategy: Tool Call mode, utilizing the LLM's native tool_call capability
|
|
6
|
+
|
|
7
|
+
Design documents:
|
|
8
|
+
- Strategy design: docs/design/architecture/explore_block_merge.md
|
|
9
|
+
- Multiple tool calls: docs/design/core/multiple-tool-calls.md
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
from abc import ABC, abstractmethod
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from typing import Optional, Dict, Any, List
|
|
16
|
+
|
|
17
|
+
from dolphin.core.common.enums import StreamItem, Messages, MessageRole
|
|
18
|
+
from dolphin.core.common.constants import TOOL_CALL_ID_PREFIX
|
|
19
|
+
from dolphin.core.context.context import Context
|
|
20
|
+
from dolphin.core.context_engineer.config.settings import BuildInBucket
|
|
21
|
+
from dolphin.core.skill.skillkit import Skillkit
|
|
22
|
+
from dolphin.core.code_block.skill_call_deduplicator import (
|
|
23
|
+
SkillCallDeduplicator,
|
|
24
|
+
DefaultSkillCallDeduplicator,
|
|
25
|
+
NoOpSkillCallDeduplicator,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ToolCall:
|
|
31
|
+
"""Unified data structure for tool calls
|
|
32
|
+
|
|
33
|
+
Used to pass tool call information between different strategies.
|
|
34
|
+
"""
|
|
35
|
+
id: str # Tool call unique identifier
|
|
36
|
+
name: str # Tool/Skill Name
|
|
37
|
+
arguments: Dict[str, Any] # Tool call parameters
|
|
38
|
+
raw_text: Optional[str] = None # Original text (used only in Prompt mode)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ExploreStrategy(ABC):
|
|
42
|
+
"""Exploration Mode Strategy Base Class
|
|
43
|
+
|
|
44
|
+
Defines core methods that different exploration modes need to implement, and provides a general implementation.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self):
|
|
48
|
+
self._deduplicator = DefaultSkillCallDeduplicator()
|
|
49
|
+
self._noop_deduplicator = NoOpSkillCallDeduplicator()
|
|
50
|
+
self._deduplicator_enabled: bool = True
|
|
51
|
+
|
|
52
|
+
# ============ Abstract Methods (must be implemented by subclasses) ============
|
|
53
|
+
|
|
54
|
+
@abstractmethod
|
|
55
|
+
def make_system_message(
|
|
56
|
+
self,
|
|
57
|
+
skillkit: Skillkit,
|
|
58
|
+
system_prompt: str,
|
|
59
|
+
tools_format: str = "medium"
|
|
60
|
+
) -> str:
|
|
61
|
+
"""Build system message containing tool descriptions"""
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
@abstractmethod
|
|
65
|
+
def get_llm_params(
|
|
66
|
+
self,
|
|
67
|
+
messages: Messages,
|
|
68
|
+
model: str,
|
|
69
|
+
skillkit: Skillkit,
|
|
70
|
+
tool_choice: Optional[str] = None,
|
|
71
|
+
no_cache: bool = False,
|
|
72
|
+
) -> Dict[str, Any]:
|
|
73
|
+
"""Build LLM call parameters"""
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
@abstractmethod
|
|
77
|
+
def detect_tool_call(
|
|
78
|
+
self,
|
|
79
|
+
stream_item: StreamItem,
|
|
80
|
+
context: Context
|
|
81
|
+
) -> Optional[ToolCall]:
|
|
82
|
+
"""Detect tool calls from streaming responses """
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
@abstractmethod
|
|
86
|
+
def has_valid_tool_call(
|
|
87
|
+
self,
|
|
88
|
+
stream_item: StreamItem,
|
|
89
|
+
context: Context
|
|
90
|
+
) -> bool:
|
|
91
|
+
"""Check for valid tool calls in streaming responses"""
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
def get_tool_call_content(
|
|
96
|
+
self,
|
|
97
|
+
stream_item: StreamItem,
|
|
98
|
+
tool_call: ToolCall
|
|
99
|
+
) -> str:
|
|
100
|
+
"""Get the content part of the tool call message
|
|
101
|
+
|
|
102
|
+
Prompt mode: return the content before the tool call marker
|
|
103
|
+
Tool Call mode: return the complete answer
|
|
104
|
+
"""
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
# ============ Generic Implementation (Reusable by Subclasses) ============
|
|
108
|
+
|
|
109
|
+
def append_tool_call_message(
|
|
110
|
+
self,
|
|
111
|
+
context: Context,
|
|
112
|
+
stream_item: StreamItem,
|
|
113
|
+
tool_call: ToolCall,
|
|
114
|
+
):
|
|
115
|
+
"""Append additional tool call messages to context"""
|
|
116
|
+
tool_call_openai_format = [
|
|
117
|
+
{
|
|
118
|
+
"id": tool_call.id,
|
|
119
|
+
"type": "function",
|
|
120
|
+
"function": {
|
|
121
|
+
"name": tool_call.name,
|
|
122
|
+
"arguments": (
|
|
123
|
+
json.dumps(tool_call.arguments, ensure_ascii=False)
|
|
124
|
+
if tool_call.arguments
|
|
125
|
+
else "{}"
|
|
126
|
+
),
|
|
127
|
+
},
|
|
128
|
+
}
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
content = self.get_tool_call_content(stream_item, tool_call)
|
|
132
|
+
|
|
133
|
+
scrapted_messages = Messages()
|
|
134
|
+
scrapted_messages.add_tool_call_message(
|
|
135
|
+
content=content, tool_calls=tool_call_openai_format
|
|
136
|
+
)
|
|
137
|
+
context.add_bucket(
|
|
138
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
139
|
+
scrapted_messages,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
def append_tool_response_message(
|
|
143
|
+
self,
|
|
144
|
+
context: Context,
|
|
145
|
+
tool_call_id: str,
|
|
146
|
+
response: str,
|
|
147
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
148
|
+
):
|
|
149
|
+
"""Add tool response message to context"""
|
|
150
|
+
scrapted_messages = Messages()
|
|
151
|
+
scrapted_messages.add_tool_response_message(
|
|
152
|
+
content=response,
|
|
153
|
+
tool_call_id=tool_call_id,
|
|
154
|
+
metadata=metadata
|
|
155
|
+
)
|
|
156
|
+
context.add_bucket(
|
|
157
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
158
|
+
scrapted_messages,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# ============ Multiple Tool Calls Support ============
|
|
162
|
+
|
|
163
|
+
def detect_tool_calls(
|
|
164
|
+
self,
|
|
165
|
+
stream_item: StreamItem,
|
|
166
|
+
context: Context
|
|
167
|
+
) -> List[ToolCall]:
|
|
168
|
+
"""Detect multiple tool calls from streaming responses (new method).
|
|
169
|
+
|
|
170
|
+
Default implementation wraps detect_tool_call() to return a single-item list.
|
|
171
|
+
Subclasses that support native multi-tool-call (e.g., ToolCallStrategy)
|
|
172
|
+
should override this method to properly extract multiple tool calls.
|
|
173
|
+
|
|
174
|
+
Note: PromptStrategy uses this default implementation since it relies on
|
|
175
|
+
the =># text format which doesn't support native parallel tool calls.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
stream_item: The streaming response item from LLM
|
|
179
|
+
context: The execution context
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
List of ToolCall objects. Empty list if no tool calls detected.
|
|
183
|
+
"""
|
|
184
|
+
single = self.detect_tool_call(stream_item, context)
|
|
185
|
+
return [single] if single else []
|
|
186
|
+
|
|
187
|
+
def append_tool_calls_message(
|
|
188
|
+
self,
|
|
189
|
+
context: Context,
|
|
190
|
+
stream_item: StreamItem,
|
|
191
|
+
tool_calls: List[ToolCall],
|
|
192
|
+
):
|
|
193
|
+
"""Append multiple tool calls message to context.
|
|
194
|
+
|
|
195
|
+
Creates a single assistant message containing all tool calls in OpenAI format.
|
|
196
|
+
This is required for proper multi-tool-call support where all tool calls
|
|
197
|
+
must be in a single message.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
context: The execution context
|
|
201
|
+
stream_item: The streaming response item (for extracting content)
|
|
202
|
+
tool_calls: List of ToolCall objects to include in the message
|
|
203
|
+
"""
|
|
204
|
+
tool_calls_openai_format = [
|
|
205
|
+
{
|
|
206
|
+
"id": tc.id,
|
|
207
|
+
"type": "function",
|
|
208
|
+
"function": {
|
|
209
|
+
"name": tc.name,
|
|
210
|
+
"arguments": (
|
|
211
|
+
json.dumps(tc.arguments, ensure_ascii=False)
|
|
212
|
+
if tc.arguments
|
|
213
|
+
else "{}"
|
|
214
|
+
),
|
|
215
|
+
},
|
|
216
|
+
}
|
|
217
|
+
for tc in tool_calls
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
content = stream_item.answer or ""
|
|
221
|
+
scratched_messages = Messages()
|
|
222
|
+
scratched_messages.add_tool_call_message(
|
|
223
|
+
content=content, tool_calls=tool_calls_openai_format
|
|
224
|
+
)
|
|
225
|
+
context.add_bucket(
|
|
226
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
227
|
+
scratched_messages,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
def set_deduplicator_enabled(self, enabled: bool):
|
|
231
|
+
"""Enable or disable the skill call deduplicator
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
enabled: Whether to enable the deduplicator; an empty implementation is used when False
|
|
235
|
+
"""
|
|
236
|
+
self._deduplicator_enabled = bool(enabled)
|
|
237
|
+
|
|
238
|
+
def get_deduplicator(self) -> SkillCallDeduplicator:
|
|
239
|
+
"""Get duplicate call detector"""
|
|
240
|
+
if self._deduplicator_enabled:
|
|
241
|
+
return self._deduplicator
|
|
242
|
+
return self._noop_deduplicator
|
|
243
|
+
|
|
244
|
+
def reset_deduplicator(self):
|
|
245
|
+
"""Reset the deduplicator state for retry scenarios."""
|
|
246
|
+
self._deduplicator.clear()
|
|
247
|
+
|
|
248
|
+
def get_tool_call_history(self) -> list:
|
|
249
|
+
"""Get the history of tool calls from the deduplicator.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
List of tool call dictionaries
|
|
253
|
+
"""
|
|
254
|
+
return self._deduplicator.get_history()
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
class PromptStrategy(ExploreStrategy):
|
|
258
|
+
"""Prompt Pattern Strategy Implementation
|
|
259
|
+
|
|
260
|
+
Tool Calling Method: Call tools in the prompt using the =>#tool_name: {json} format
|
|
261
|
+
"""
|
|
262
|
+
|
|
263
|
+
TOKEN_TOOL_CALL = "=>#"
|
|
264
|
+
|
|
265
|
+
def __init__(self):
|
|
266
|
+
super().__init__()
|
|
267
|
+
|
|
268
|
+
def append_tool_call_message(
|
|
269
|
+
self,
|
|
270
|
+
context: Context,
|
|
271
|
+
stream_item: StreamItem,
|
|
272
|
+
tool_call: ToolCall,
|
|
273
|
+
):
|
|
274
|
+
"""Add additional tool call messages to the context (using plain text format in Prompt mode)
|
|
275
|
+
|
|
276
|
+
In Prompt mode, LLM uses =># format to invoke tools, not OpenAI native tool_call.
|
|
277
|
+
Therefore, the messages should remain in plain text format without containing a tool_calls array.
|
|
278
|
+
"""
|
|
279
|
+
# Use full =># format text as the assistant message content
|
|
280
|
+
content = stream_item.answer or ""
|
|
281
|
+
|
|
282
|
+
scrapted_messages = Messages()
|
|
283
|
+
scrapted_messages.add_message(content, role=MessageRole.ASSISTANT)
|
|
284
|
+
context.add_bucket(
|
|
285
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
286
|
+
scrapted_messages,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
def append_tool_response_message(
|
|
290
|
+
self,
|
|
291
|
+
context: Context,
|
|
292
|
+
tool_call_id: str,
|
|
293
|
+
response: str,
|
|
294
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
295
|
+
):
|
|
296
|
+
"""Add tool response message to context (used in Prompt mode with user message format)
|
|
297
|
+
|
|
298
|
+
In Prompt mode, LLM does not understand OpenAI's tool role message format.
|
|
299
|
+
Tool responses should be added in user message format so that the LLM can understand and continue the conversation.
|
|
300
|
+
"""
|
|
301
|
+
# Format tool response as user message
|
|
302
|
+
formatted_response = f"[工具返回结果]: {response}"
|
|
303
|
+
|
|
304
|
+
scrapted_messages = Messages()
|
|
305
|
+
scrapted_messages.add_message(formatted_response, role=MessageRole.USER, metadata=metadata)
|
|
306
|
+
context.add_bucket(
|
|
307
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
308
|
+
scrapted_messages,
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
def make_system_message(
|
|
312
|
+
self,
|
|
313
|
+
skillkit: Skillkit,
|
|
314
|
+
system_prompt: str,
|
|
315
|
+
tools_format: str = "medium"
|
|
316
|
+
) -> str:
|
|
317
|
+
"""Build system message for Prompt mode.
|
|
318
|
+
|
|
319
|
+
Includes:
|
|
320
|
+
- Goals and tool schemas
|
|
321
|
+
- Metadata prompt from skillkits (e.g., ResourceSkillkit Level 1)
|
|
322
|
+
- User-provided system prompt
|
|
323
|
+
"""
|
|
324
|
+
role_format = """
|
|
325
|
+
## Goals:
|
|
326
|
+
- 你需要分析用户的问题,决定由自己回答问题还是使用工具来处理问题。tools中的工具就是你可以使用的全部工具。
|
|
327
|
+
|
|
328
|
+
## tools:
|
|
329
|
+
{tools}
|
|
330
|
+
|
|
331
|
+
### tools use Constraints:
|
|
332
|
+
- 你必须清晰的理解问题和熟练使用工具,优先使用工具回答。
|
|
333
|
+
- 当需要调用工具的时候,你需要使用"=>#tool_name: {{key:value}}"的格式来调用工具,其中参数为严格的json格式,例如"=>#someskill: {"key1": "value1", "key2": "value2"}"。
|
|
334
|
+
|
|
335
|
+
{metadata_prompt}
|
|
336
|
+
{system_prompt}
|
|
337
|
+
"""
|
|
338
|
+
if skillkit is not None and not skillkit.isEmpty():
|
|
339
|
+
# Use getFormattedToolsDescription instead of getSchemas for better readability
|
|
340
|
+
role = role_format.replace(r"{tools}", skillkit.getFormattedToolsDescription(tools_format))
|
|
341
|
+
else:
|
|
342
|
+
role_format = """{metadata_prompt}
|
|
343
|
+
{system_prompt}"""
|
|
344
|
+
role = role_format
|
|
345
|
+
|
|
346
|
+
# Inject metadata prompt from skillkits via skill.owner_skillkit
|
|
347
|
+
metadata_prompt = Skillkit.collect_metadata_from_skills(skillkit)
|
|
348
|
+
role = role.replace(r"{metadata_prompt}", metadata_prompt)
|
|
349
|
+
|
|
350
|
+
# Replace user system prompt
|
|
351
|
+
if len(system_prompt.strip()) == 0:
|
|
352
|
+
role = role.replace(r"{system_prompt}", "")
|
|
353
|
+
else:
|
|
354
|
+
role = role.replace(
|
|
355
|
+
r"{system_prompt}", "## User Demands:\n" + system_prompt.strip()
|
|
356
|
+
)
|
|
357
|
+
return role
|
|
358
|
+
|
|
359
|
+
def get_llm_params(
|
|
360
|
+
self,
|
|
361
|
+
messages: Messages,
|
|
362
|
+
model: str,
|
|
363
|
+
skillkit: Skillkit,
|
|
364
|
+
tool_choice: Optional[str] = None,
|
|
365
|
+
no_cache: bool = False,
|
|
366
|
+
) -> Dict[str, Any]:
|
|
367
|
+
"""Prompt mode does not pass the tools parameter"""
|
|
368
|
+
return {
|
|
369
|
+
"messages": messages,
|
|
370
|
+
"model": model,
|
|
371
|
+
"no_cache": no_cache,
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
def detect_tool_call(
|
|
375
|
+
self,
|
|
376
|
+
stream_item: StreamItem,
|
|
377
|
+
context: Context
|
|
378
|
+
) -> Optional[ToolCall]:
|
|
379
|
+
"""Detect tool calls from responses in Prompt mode"""
|
|
380
|
+
answer = stream_item.answer
|
|
381
|
+
if not answer or self.TOKEN_TOOL_CALL not in answer:
|
|
382
|
+
return None
|
|
383
|
+
|
|
384
|
+
skill_name = self._first_likely_skill(answer)
|
|
385
|
+
if not skill_name:
|
|
386
|
+
return None
|
|
387
|
+
|
|
388
|
+
skillkit = context.get_skillkit()
|
|
389
|
+
if skillkit is None or skill_name not in skillkit.getSkillNames():
|
|
390
|
+
return None
|
|
391
|
+
|
|
392
|
+
skill_call = self._complete_skill_call(answer)
|
|
393
|
+
if skill_call is None:
|
|
394
|
+
return None
|
|
395
|
+
|
|
396
|
+
skill_name, arguments = skill_call
|
|
397
|
+
tool_call_id = f"{TOOL_CALL_ID_PREFIX}{skill_name}_{id(stream_item) % 10000}"
|
|
398
|
+
|
|
399
|
+
return ToolCall(
|
|
400
|
+
id=tool_call_id,
|
|
401
|
+
name=skill_name,
|
|
402
|
+
arguments=arguments,
|
|
403
|
+
raw_text=self._first_likely_skill_call(answer)
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
def has_valid_tool_call(
|
|
407
|
+
self,
|
|
408
|
+
stream_item: StreamItem,
|
|
409
|
+
context: Context
|
|
410
|
+
) -> bool:
|
|
411
|
+
"""Check if there is a valid tool call in the Prompt mode response"""
|
|
412
|
+
answer = stream_item.answer
|
|
413
|
+
if not answer or self.TOKEN_TOOL_CALL not in answer:
|
|
414
|
+
return False
|
|
415
|
+
|
|
416
|
+
skill_name = self._first_likely_skill(answer)
|
|
417
|
+
if not skill_name:
|
|
418
|
+
return False
|
|
419
|
+
|
|
420
|
+
skillkit = context.get_skillkit()
|
|
421
|
+
return skillkit is not None and skill_name in skillkit.getSkillNames()
|
|
422
|
+
|
|
423
|
+
def get_tool_call_content(
|
|
424
|
+
self,
|
|
425
|
+
stream_item: StreamItem,
|
|
426
|
+
tool_call: ToolCall
|
|
427
|
+
) -> str:
|
|
428
|
+
"""Get the content before tool invocation"""
|
|
429
|
+
answer = stream_item.answer or ""
|
|
430
|
+
if self.TOKEN_TOOL_CALL in answer:
|
|
431
|
+
return answer.split(self.TOKEN_TOOL_CALL)[0]
|
|
432
|
+
return answer
|
|
433
|
+
|
|
434
|
+
# ============ Helper Methods ============
|
|
435
|
+
|
|
436
|
+
def _first_likely_skill(self, buffer: str) -> Optional[str]:
|
|
437
|
+
"""Extract the first possible skill name from the buffer"""
|
|
438
|
+
if self.TOKEN_TOOL_CALL not in buffer:
|
|
439
|
+
return None
|
|
440
|
+
return buffer.split(self.TOKEN_TOOL_CALL)[-1].split(":")[0].strip()
|
|
441
|
+
|
|
442
|
+
def _first_likely_skill_call(self, buffer: str) -> str:
|
|
443
|
+
"""Get the first complete skill invocation text"""
|
|
444
|
+
return self.TOKEN_TOOL_CALL + buffer.split(self.TOKEN_TOOL_CALL)[-1]
|
|
445
|
+
|
|
446
|
+
def _complete_skill_call(self, buffer: str) -> Optional[tuple]:
|
|
447
|
+
"""Extract complete skill calls from buffer"""
|
|
448
|
+
from dolphin.core.parser.parser import params_extract
|
|
449
|
+
|
|
450
|
+
token = self.TOKEN_TOOL_CALL
|
|
451
|
+
first_token_pos = buffer.find(token)
|
|
452
|
+
if first_token_pos == -1:
|
|
453
|
+
return None
|
|
454
|
+
|
|
455
|
+
start_pos = first_token_pos + len(token)
|
|
456
|
+
colon_pos = buffer.find(":", start_pos)
|
|
457
|
+
if colon_pos == -1:
|
|
458
|
+
return None
|
|
459
|
+
|
|
460
|
+
skill_name = buffer[start_pos:colon_pos].strip()
|
|
461
|
+
params_start = colon_pos + 1
|
|
462
|
+
|
|
463
|
+
while params_start < len(buffer) and buffer[params_start].isspace():
|
|
464
|
+
params_start += 1
|
|
465
|
+
|
|
466
|
+
if params_start >= len(buffer):
|
|
467
|
+
return None
|
|
468
|
+
|
|
469
|
+
if buffer[params_start] == "{":
|
|
470
|
+
bracket_count = 0
|
|
471
|
+
params_end = params_start
|
|
472
|
+
in_string = False
|
|
473
|
+
escape_next = False
|
|
474
|
+
|
|
475
|
+
for i in range(params_start, len(buffer)):
|
|
476
|
+
char = buffer[i]
|
|
477
|
+
|
|
478
|
+
if escape_next:
|
|
479
|
+
escape_next = False
|
|
480
|
+
continue
|
|
481
|
+
|
|
482
|
+
if char == "\\":
|
|
483
|
+
escape_next = True
|
|
484
|
+
continue
|
|
485
|
+
|
|
486
|
+
if char == '"' and not escape_next:
|
|
487
|
+
in_string = not in_string
|
|
488
|
+
continue
|
|
489
|
+
|
|
490
|
+
if not in_string:
|
|
491
|
+
if char == "{":
|
|
492
|
+
bracket_count += 1
|
|
493
|
+
elif char == "}":
|
|
494
|
+
bracket_count -= 1
|
|
495
|
+
if bracket_count == 0:
|
|
496
|
+
params_end = i + 1
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
if bracket_count == 0:
|
|
500
|
+
params_content = buffer[params_start:params_end]
|
|
501
|
+
parsed_params = params_extract(params_content)
|
|
502
|
+
return (skill_name, parsed_params)
|
|
503
|
+
|
|
504
|
+
return None
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
class ToolCallStrategy(ExploreStrategy):
|
|
508
|
+
"""Tool Call Mode Strategy Implementation
|
|
509
|
+
|
|
510
|
+
Tool Calling Method: Use the LLM's native tool_call capability
|
|
511
|
+
"""
|
|
512
|
+
|
|
513
|
+
def __init__(self, tools_format: str = "medium"):
|
|
514
|
+
super().__init__()
|
|
515
|
+
self.tools_format = tools_format
|
|
516
|
+
|
|
517
|
+
def make_system_message(
|
|
518
|
+
self,
|
|
519
|
+
skillkit: Skillkit,
|
|
520
|
+
system_prompt: str,
|
|
521
|
+
tools_format: str = "medium"
|
|
522
|
+
) -> str:
|
|
523
|
+
"""Build system message for Tool Call mode.
|
|
524
|
+
|
|
525
|
+
Includes:
|
|
526
|
+
- Goals and tool descriptions
|
|
527
|
+
- Metadata prompt from skillkits (e.g., ResourceSkillkit Level 1)
|
|
528
|
+
- User-provided system prompt
|
|
529
|
+
"""
|
|
530
|
+
role_format = """
|
|
531
|
+
## Goals:
|
|
532
|
+
- 你需要:先仔细思考和分析用户的问题,然后决定由自己回答问题还是使用工具来处理问题,务必在调用工具前仔细思考。tools中的工具就是你可以使用的全部工具。
|
|
533
|
+
|
|
534
|
+
## Available Tools:
|
|
535
|
+
{tools}
|
|
536
|
+
|
|
537
|
+
### Tools Usage Guidelines:
|
|
538
|
+
- 仔细阅读每个工具的描述和参数要求
|
|
539
|
+
- 根据问题的具体需求选择最合适的工具
|
|
540
|
+
- 在调用工具前确保参数完整和正确
|
|
541
|
+
- 如果不确定工具用法,可以先尝试简单的调用来了解
|
|
542
|
+
|
|
543
|
+
{metadata_prompt}
|
|
544
|
+
{system_prompt}
|
|
545
|
+
"""
|
|
546
|
+
|
|
547
|
+
# Replace tools description
|
|
548
|
+
if skillkit is not None and not skillkit.isEmpty():
|
|
549
|
+
tools_description = skillkit.getFormattedToolsDescription(tools_format)
|
|
550
|
+
role_format = role_format.replace(r"{tools}", tools_description)
|
|
551
|
+
else:
|
|
552
|
+
role_format = role_format.replace(
|
|
553
|
+
r"{tools}", "用户没有配置工具,你只能自己回答问题!"
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
# Inject metadata prompt from skillkits via skill.owner_skillkit
|
|
557
|
+
metadata_prompt = Skillkit.collect_metadata_from_skills(skillkit)
|
|
558
|
+
role_format = role_format.replace(r"{metadata_prompt}", metadata_prompt)
|
|
559
|
+
|
|
560
|
+
# Replace user system prompt
|
|
561
|
+
if not system_prompt or len(system_prompt.strip()) == 0:
|
|
562
|
+
role_format = role_format.replace(r"{system_prompt}", "")
|
|
563
|
+
else:
|
|
564
|
+
role_format = role_format.replace(r"{system_prompt}", system_prompt)
|
|
565
|
+
|
|
566
|
+
return role_format
|
|
567
|
+
|
|
568
|
+
def get_llm_params(
|
|
569
|
+
self,
|
|
570
|
+
messages: Messages,
|
|
571
|
+
model: str,
|
|
572
|
+
skillkit: Skillkit,
|
|
573
|
+
tool_choice: Optional[str] = None,
|
|
574
|
+
no_cache: bool = False,
|
|
575
|
+
) -> Dict[str, Any]:
|
|
576
|
+
"""Includes the tools parameter and an optional tool_choice"""
|
|
577
|
+
llm_params = {
|
|
578
|
+
"messages": messages,
|
|
579
|
+
"model": model,
|
|
580
|
+
"no_cache": no_cache,
|
|
581
|
+
"tools": skillkit.getSkillsSchema() if skillkit and not skillkit.isEmpty() else [],
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
if tool_choice:
|
|
585
|
+
llm_params["tool_choice"] = tool_choice
|
|
586
|
+
|
|
587
|
+
return llm_params
|
|
588
|
+
|
|
589
|
+
def detect_tool_call(
|
|
590
|
+
self,
|
|
591
|
+
stream_item: StreamItem,
|
|
592
|
+
context: Context
|
|
593
|
+
) -> Optional[ToolCall]:
|
|
594
|
+
"""Detect tool calls from Tool Call mode responses"""
|
|
595
|
+
# Use has_tool_call() instead of has_complete_tool_call() to stay consistent with has_valid_tool_call()
|
|
596
|
+
# This way tool calls can be detected even if tool_args are not fully received yet (args use empty dict)
|
|
597
|
+
if not stream_item.has_tool_call():
|
|
598
|
+
return None
|
|
599
|
+
|
|
600
|
+
tool_call_id = stream_item.tool_call_id or f"{TOOL_CALL_ID_PREFIX}{stream_item.tool_name}_{id(stream_item) % 10000}"
|
|
601
|
+
|
|
602
|
+
return ToolCall(
|
|
603
|
+
id=tool_call_id,
|
|
604
|
+
name=stream_item.tool_name,
|
|
605
|
+
arguments=stream_item.tool_args or {},
|
|
606
|
+
raw_text=None
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
def has_valid_tool_call(
|
|
610
|
+
self,
|
|
611
|
+
stream_item: StreamItem,
|
|
612
|
+
context: Context
|
|
613
|
+
) -> bool:
|
|
614
|
+
"""Check for valid tool calls in Tool Call mode responses"""
|
|
615
|
+
return stream_item.has_tool_call()
|
|
616
|
+
|
|
617
|
+
def get_tool_call_content(
|
|
618
|
+
self,
|
|
619
|
+
stream_item: StreamItem,
|
|
620
|
+
tool_call: ToolCall
|
|
621
|
+
) -> str:
|
|
622
|
+
"""Return the complete answer"""
|
|
623
|
+
return stream_item.answer or ""
|
|
624
|
+
|
|
625
|
+
def detect_tool_calls(
|
|
626
|
+
self,
|
|
627
|
+
stream_item: StreamItem,
|
|
628
|
+
context: Context
|
|
629
|
+
) -> List[ToolCall]:
|
|
630
|
+
"""Detect multiple tool calls from Tool Call mode responses.
|
|
631
|
+
|
|
632
|
+
Overrides base class to properly handle multiple tool calls
|
|
633
|
+
from the StreamItem.tool_calls list.
|
|
634
|
+
|
|
635
|
+
Note: Only returns tool calls where arguments have been successfully parsed.
|
|
636
|
+
Logs warnings for tool calls with unparseable arguments when stream is complete.
|
|
637
|
+
|
|
638
|
+
Args:
|
|
639
|
+
stream_item: The streaming response item from LLM
|
|
640
|
+
context: The execution context
|
|
641
|
+
|
|
642
|
+
Returns:
|
|
643
|
+
List of ToolCall objects with valid arguments.
|
|
644
|
+
"""
|
|
645
|
+
tool_call_infos = stream_item.get_tool_calls()
|
|
646
|
+
if not tool_call_infos:
|
|
647
|
+
return []
|
|
648
|
+
|
|
649
|
+
result = []
|
|
650
|
+
for info in tool_call_infos:
|
|
651
|
+
# Only include tool calls that are complete (arguments successfully parsed)
|
|
652
|
+
# The is_complete field is set during parse_from_chunk when JSON parsing succeeds
|
|
653
|
+
if info.is_complete and info.arguments is not None:
|
|
654
|
+
result.append(ToolCall(
|
|
655
|
+
id=info.id,
|
|
656
|
+
name=info.name,
|
|
657
|
+
arguments=info.arguments,
|
|
658
|
+
raw_text=None
|
|
659
|
+
))
|
|
660
|
+
elif stream_item.finish_reason is not None and not info.is_complete:
|
|
661
|
+
# Stream has ended but arguments failed to parse - log warning
|
|
662
|
+
context.warn(
|
|
663
|
+
f"Tool call {info.name} (id={info.id}) skipped: "
|
|
664
|
+
f"Stream ended but JSON arguments incomplete or invalid. "
|
|
665
|
+
f"Raw arguments: '{info.raw_arguments[:200]}...'"
|
|
666
|
+
if len(info.raw_arguments) > 200 else
|
|
667
|
+
f"Tool call {info.name} (id={info.id}) skipped: "
|
|
668
|
+
f"Stream ended but JSON arguments incomplete or invalid. "
|
|
669
|
+
f"Raw arguments: '{info.raw_arguments}'"
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
return result
|