kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,712 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import json
|
|
3
|
+
import traceback
|
|
4
|
+
from typing import Optional, AsyncGenerator, Dict, Any
|
|
5
|
+
from dolphin.core.code_block.basic_code_block import BasicCodeBlock
|
|
6
|
+
from dolphin.core.common.enums import (
|
|
7
|
+
CategoryBlock,
|
|
8
|
+
MessageRole,
|
|
9
|
+
Messages,
|
|
10
|
+
PlainMessages,
|
|
11
|
+
TypeStage,
|
|
12
|
+
StreamItem,
|
|
13
|
+
)
|
|
14
|
+
from dolphin.core.common.constants import (
|
|
15
|
+
MAX_SKILL_CALL_TIMES,
|
|
16
|
+
get_msg_duplicate_skill_call,
|
|
17
|
+
)
|
|
18
|
+
from dolphin.core.context.context import Context
|
|
19
|
+
from dolphin.core.context_engineer.config.settings import BuildInBucket
|
|
20
|
+
from dolphin.core.llm.llm_client import LLMClient
|
|
21
|
+
from dolphin.core.logging.logger import console, console_skill_response, get_logger
|
|
22
|
+
from dolphin.lib.skillkits.cognitive_skillkit import CognitiveSkillkit
|
|
23
|
+
from dolphin.core.utils.tools import ToolInterrupt
|
|
24
|
+
from dolphin.core.common.types import SourceType
|
|
25
|
+
from dolphin.lib.skillkits.system_skillkit import SystemFunctions
|
|
26
|
+
|
|
27
|
+
logger = get_logger("code_block.explore_block_v2")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class DeduplicatorSkillCall:
|
|
31
|
+
MAX_DUPLICATE_COUNT = 5
|
|
32
|
+
|
|
33
|
+
def __init__(self):
|
|
34
|
+
# Optimize performance of duplicate checking using sets
|
|
35
|
+
self.skillcalls = {}
|
|
36
|
+
self.call_results = {}
|
|
37
|
+
# Cache the string representation of skill calls to avoid redundant serialization.
|
|
38
|
+
self._call_key_cache = {}
|
|
39
|
+
|
|
40
|
+
def clear(self):
|
|
41
|
+
"""Clear all cached data"""
|
|
42
|
+
self.skillcalls.clear()
|
|
43
|
+
self.call_results.clear()
|
|
44
|
+
self._call_key_cache.clear()
|
|
45
|
+
|
|
46
|
+
def _get_call_key(self, skill_call):
|
|
47
|
+
"""Get the standardized string representation of a skill call (with caching)"""
|
|
48
|
+
# Use id() as the cache key, because skill_call is usually a hashable object
|
|
49
|
+
cache_key = id(skill_call)
|
|
50
|
+
if cache_key in self._call_key_cache:
|
|
51
|
+
return self._call_key_cache[cache_key]
|
|
52
|
+
|
|
53
|
+
call_key = json.dumps(skill_call, sort_keys=True, ensure_ascii=False)
|
|
54
|
+
self._call_key_cache[cache_key] = call_key
|
|
55
|
+
return call_key
|
|
56
|
+
|
|
57
|
+
def add(self, skill_call, result=None):
|
|
58
|
+
"""Add skill invocation record"""
|
|
59
|
+
call_key = self._get_call_key(skill_call)
|
|
60
|
+
self.skillcalls[call_key] = self.skillcalls.get(call_key, 0) + 1
|
|
61
|
+
if result is not None:
|
|
62
|
+
self.call_results[call_key] = result
|
|
63
|
+
|
|
64
|
+
def is_duplicate(self, skill_call):
|
|
65
|
+
"""Check if it's a repeated call"""
|
|
66
|
+
call_key = self._get_call_key(skill_call)
|
|
67
|
+
|
|
68
|
+
# For certain tools, allow re-invocation if the result of the previous call is invalid.
|
|
69
|
+
if self._should_allow_retry(skill_call, call_key):
|
|
70
|
+
return False
|
|
71
|
+
|
|
72
|
+
return self.skillcalls.get(call_key, 0) >= self.MAX_DUPLICATE_COUNT
|
|
73
|
+
|
|
74
|
+
def _should_allow_retry(self, skill_call, call_key):
|
|
75
|
+
"""Determine whether to allow retrying a skill invocation"""
|
|
76
|
+
skill_name = skill_call.get("name", "")
|
|
77
|
+
|
|
78
|
+
# A call without arguments is always allowed to be retried
|
|
79
|
+
if not skill_call.get("arguments"):
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
# Allow retries if previous results are invalid for tools such as browser_snapshot
|
|
83
|
+
if "snapshot" in skill_name.lower():
|
|
84
|
+
previous_result = self.call_results.get(call_key)
|
|
85
|
+
if previous_result is not None:
|
|
86
|
+
result_str = str(previous_result).strip().lower()
|
|
87
|
+
# If the previous result is too short or contains error messages, retries are allowed.
|
|
88
|
+
return (
|
|
89
|
+
len(result_str) < 50
|
|
90
|
+
or "about:blank" in result_str
|
|
91
|
+
or "error" in result_str
|
|
92
|
+
or "empty" in result_str
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
return False
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class ExploreBlockV2(BasicCodeBlock):
|
|
99
|
+
def __init__(
|
|
100
|
+
self,
|
|
101
|
+
context: Context,
|
|
102
|
+
debug_infos: Optional[dict] = None,
|
|
103
|
+
tools_format: str = "medium",
|
|
104
|
+
):
|
|
105
|
+
super().__init__(context)
|
|
106
|
+
|
|
107
|
+
self.llm_client = LLMClient(self.context)
|
|
108
|
+
self.debug_infos = debug_infos
|
|
109
|
+
self.times = 0
|
|
110
|
+
self.deduplicator_skillcall = DeduplicatorSkillCall()
|
|
111
|
+
# Tools description format: "concise", "medium", or "detailed"
|
|
112
|
+
self.tools_format = tools_format
|
|
113
|
+
# Mark whether exploration should be stopped (set to True when there is no tool call)
|
|
114
|
+
self.should_stop_exploration = False
|
|
115
|
+
# Whether to enable skill call deduplication (consistent with the semantics of ExploreBlock, enabled by default)
|
|
116
|
+
self.enable_skill_deduplicator = getattr(
|
|
117
|
+
self, "enable_skill_deduplicator", True
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
async def execute(
|
|
121
|
+
self,
|
|
122
|
+
content,
|
|
123
|
+
category: CategoryBlock = CategoryBlock.EXPLORE,
|
|
124
|
+
replace_variables=True,
|
|
125
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
126
|
+
# Call the parent class's execute method
|
|
127
|
+
async for _ in super().execute(content, category, replace_variables):
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
# Compatible with older versions, output the entire progress content
|
|
131
|
+
self.recorder.set_output_dump_process(True) if self.recorder else None
|
|
132
|
+
|
|
133
|
+
self.block_start_log("explore")
|
|
134
|
+
|
|
135
|
+
# reset messages first, then build init messages to preserve system and history
|
|
136
|
+
self._make_init_messages()
|
|
137
|
+
|
|
138
|
+
# Consume the async generator to execute the logic
|
|
139
|
+
async for ret in self._execute_generator():
|
|
140
|
+
yield ret
|
|
141
|
+
|
|
142
|
+
# Update history and cleanup buckets after execution
|
|
143
|
+
# Uses the base class implementation in BasicCodeBlock
|
|
144
|
+
self._update_history_and_cleanup()
|
|
145
|
+
|
|
146
|
+
async def _execute_generator(self):
|
|
147
|
+
"""
|
|
148
|
+
Actual implementation that yields results
|
|
149
|
+
"""
|
|
150
|
+
# Simplify has_add variable initialization
|
|
151
|
+
has_add = False if self.assign_type == ">>" else None
|
|
152
|
+
|
|
153
|
+
# Use loops instead of recursion to avoid stack overflow
|
|
154
|
+
while True:
|
|
155
|
+
async for ret in self._explore_once(no_cache=True):
|
|
156
|
+
if self.assign_type == ">>":
|
|
157
|
+
if has_add:
|
|
158
|
+
self.context.update_var_output(
|
|
159
|
+
self.output_var, ret, SourceType.EXPLORE
|
|
160
|
+
)
|
|
161
|
+
else:
|
|
162
|
+
self.context.append_var_output(
|
|
163
|
+
self.output_var, ret, SourceType.EXPLORE
|
|
164
|
+
)
|
|
165
|
+
has_add = True
|
|
166
|
+
elif self.assign_type == "->":
|
|
167
|
+
self.context.set_var_output(
|
|
168
|
+
self.output_var, ret, SourceType.EXPLORE
|
|
169
|
+
)
|
|
170
|
+
# If assign_type is another value, do nothing
|
|
171
|
+
yield ret
|
|
172
|
+
|
|
173
|
+
# Check whether to continue the next exploration
|
|
174
|
+
if not self._should_continue_explore():
|
|
175
|
+
break
|
|
176
|
+
|
|
177
|
+
def _make_system_message(self):
|
|
178
|
+
"""Build system message for Tool Call mode.
|
|
179
|
+
|
|
180
|
+
Includes:
|
|
181
|
+
- Goals and tool descriptions
|
|
182
|
+
- Metadata prompt from skillkits (e.g., ResourceSkillkit Level 1)
|
|
183
|
+
- User-provided system prompt
|
|
184
|
+
"""
|
|
185
|
+
role_format = """
|
|
186
|
+
## Goals:
|
|
187
|
+
- 你需要:先仔细思考和分析用户的问题,然后决定由自己回答问题还是使用工具来处理问题,务必在调用工具前仔细思考。tools中的工具就是你可以使用的全部工具。
|
|
188
|
+
|
|
189
|
+
## Available Tools:
|
|
190
|
+
{tools}
|
|
191
|
+
|
|
192
|
+
### Tools Usage Guidelines:
|
|
193
|
+
- 仔细阅读每个工具的描述和参数要求
|
|
194
|
+
- 根据问题的具体需求选择最合适的工具
|
|
195
|
+
- 在调用工具前确保参数完整和正确
|
|
196
|
+
- 如果不确定工具用法,可以先尝试简单的调用来了解
|
|
197
|
+
|
|
198
|
+
{metadata_prompt}
|
|
199
|
+
{system_prompt}
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
skillkit = self.get_skillkit()
|
|
203
|
+
if skillkit is not None and not skillkit.isEmpty():
|
|
204
|
+
# Use the configured tools format (concise/medium/detailed)
|
|
205
|
+
tools_description = skillkit.getFormattedToolsDescription(self.tools_format)
|
|
206
|
+
role_format = role_format.replace(r"{tools}", tools_description)
|
|
207
|
+
else:
|
|
208
|
+
role_format = role_format.replace(
|
|
209
|
+
r"{tools}", "用户没有配置工具,你只能自己回答问题!"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Inject metadata prompt from skillkits via skill.owner_skillkit
|
|
213
|
+
from dolphin.core.skill.skillkit import Skillkit
|
|
214
|
+
metadata_prompt = Skillkit.collect_metadata_from_skills(skillkit)
|
|
215
|
+
role_format = role_format.replace(r"{metadata_prompt}", metadata_prompt)
|
|
216
|
+
|
|
217
|
+
# Replace user system prompt
|
|
218
|
+
if not self.system_prompt or len(self.system_prompt.strip()) == 0:
|
|
219
|
+
role_format = role_format.replace(r"{system_prompt}", "")
|
|
220
|
+
else:
|
|
221
|
+
role_format = role_format.replace(r"{system_prompt}", self.system_prompt)
|
|
222
|
+
|
|
223
|
+
return role_format
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _make_history_messages(self):
|
|
227
|
+
if isinstance(self.history, bool):
|
|
228
|
+
use_history_flag = self.history
|
|
229
|
+
else:
|
|
230
|
+
use_history_flag = self.history.lower() == "true"
|
|
231
|
+
|
|
232
|
+
if use_history_flag:
|
|
233
|
+
history_messages = self.context.get_history_messages()
|
|
234
|
+
return history_messages or Messages()
|
|
235
|
+
return None
|
|
236
|
+
|
|
237
|
+
def _make_init_messages(self):
|
|
238
|
+
"""Build initialization message"""
|
|
239
|
+
system_message = self._make_system_message()
|
|
240
|
+
history_messages = self._make_history_messages()
|
|
241
|
+
self._add_messages_to_context_manager(system_message, history_messages)
|
|
242
|
+
|
|
243
|
+
def _add_messages_to_context_manager(
|
|
244
|
+
self, system_message: str, history_messages: Messages
|
|
245
|
+
):
|
|
246
|
+
if len(system_message.strip()) > 0 and self.context.context_manager:
|
|
247
|
+
self.context.add_bucket(
|
|
248
|
+
BuildInBucket.SYSTEM.value,
|
|
249
|
+
system_message,
|
|
250
|
+
message_role=MessageRole.SYSTEM,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
if self.content and self.context.context_manager:
|
|
254
|
+
self.context.add_bucket(
|
|
255
|
+
BuildInBucket.QUERY.value,
|
|
256
|
+
self.content,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
if (
|
|
260
|
+
self.history
|
|
261
|
+
and history_messages is not None
|
|
262
|
+
and not history_messages.empty()
|
|
263
|
+
and self.context.context_manager
|
|
264
|
+
):
|
|
265
|
+
self.context.set_history_bucket(history_messages)
|
|
266
|
+
|
|
267
|
+
async def _explore_once(self, no_cache: bool = False):
|
|
268
|
+
"""Perform one exploration to avoid recursive calls"""
|
|
269
|
+
|
|
270
|
+
self.context.debug(
|
|
271
|
+
f"explore[{self.output_var}] messages[{self.context.get_messages().str_summary()}] length[{self.context.get_messages().length()}]"
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# Check if there is a tool call for interrupt recovery
|
|
275
|
+
if self._has_pending_tool_call():
|
|
276
|
+
async for ret in self._handle_resumed_tool_call():
|
|
277
|
+
yield ret
|
|
278
|
+
else:
|
|
279
|
+
async for ret in self._handle_new_tool_call(no_cache):
|
|
280
|
+
yield ret
|
|
281
|
+
|
|
282
|
+
def _has_pending_tool_call(self) -> bool:
|
|
283
|
+
"""Check if there are pending tool calls"""
|
|
284
|
+
intervention_tmp_key = "intervention_explore_block_vars"
|
|
285
|
+
return (
|
|
286
|
+
intervention_tmp_key in self.context.get_all_variables().keys()
|
|
287
|
+
and "tool" in self.context.get_all_variables().keys()
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
async def _handle_resumed_tool_call(self):
|
|
291
|
+
"""Tool calls for handling interrupt recovery"""
|
|
292
|
+
intervention_tmp_key = "intervention_explore_block_vars"
|
|
293
|
+
|
|
294
|
+
# Get the content of saved temporary variables
|
|
295
|
+
intervention_vars = self.context.get_var_value(intervention_tmp_key)
|
|
296
|
+
self.context.delete_variable(intervention_tmp_key)
|
|
297
|
+
|
|
298
|
+
# restore the complete message context before tool execution
|
|
299
|
+
saved_messages = intervention_vars.get("prompt")
|
|
300
|
+
if saved_messages is not None:
|
|
301
|
+
msgs = Messages()
|
|
302
|
+
msgs.extend_plain_messages(saved_messages)
|
|
303
|
+
self.context.set_messages(msgs)
|
|
304
|
+
|
|
305
|
+
input_dict = self.context.get_var_value("tool")
|
|
306
|
+
function_name = input_dict["tool_name"]
|
|
307
|
+
raw_tool_args = input_dict["tool_args"]
|
|
308
|
+
function_params_json = {arg["key"]: arg["value"] for arg in raw_tool_args}
|
|
309
|
+
|
|
310
|
+
(
|
|
311
|
+
self.recorder.update(
|
|
312
|
+
stage=TypeStage.SKILL,
|
|
313
|
+
source_type=SourceType.EXPLORE,
|
|
314
|
+
skill_name=function_name,
|
|
315
|
+
skill_type=self.context.get_skill_type(function_name),
|
|
316
|
+
skill_args=function_params_json,
|
|
317
|
+
)
|
|
318
|
+
if self.recorder
|
|
319
|
+
else None
|
|
320
|
+
)
|
|
321
|
+
self.context.delete_variable("tool")
|
|
322
|
+
|
|
323
|
+
return_answer = {}
|
|
324
|
+
try:
|
|
325
|
+
props = {"intervention": False}
|
|
326
|
+
have_answer = False
|
|
327
|
+
|
|
328
|
+
async for resp in self.skill_run(
|
|
329
|
+
skill_name=function_name,
|
|
330
|
+
source_type=SourceType.EXPLORE,
|
|
331
|
+
skill_params_json=function_params_json,
|
|
332
|
+
props=props,
|
|
333
|
+
):
|
|
334
|
+
if (
|
|
335
|
+
isinstance(resp, dict)
|
|
336
|
+
and "answer" in resp
|
|
337
|
+
and isinstance(resp["answer"], dict)
|
|
338
|
+
and "answer" in resp["answer"]
|
|
339
|
+
):
|
|
340
|
+
return_answer["answer"] = resp.get("answer", "").get("answer", "")
|
|
341
|
+
return_answer["think"] = resp.get("answer", "").get("think", "")
|
|
342
|
+
if "block_answer" in resp:
|
|
343
|
+
return_answer["block_answer"] = resp.get("block_answer", "")
|
|
344
|
+
else:
|
|
345
|
+
(
|
|
346
|
+
self.recorder.update(
|
|
347
|
+
item={"answer": resp, "block_answer": resp},
|
|
348
|
+
stage=TypeStage.SKILL,
|
|
349
|
+
source_type=SourceType.EXPLORE,
|
|
350
|
+
skill_name=function_name,
|
|
351
|
+
skill_type=self.context.get_skill_type(function_name),
|
|
352
|
+
skill_args=function_params_json,
|
|
353
|
+
)
|
|
354
|
+
if self.recorder
|
|
355
|
+
else None
|
|
356
|
+
)
|
|
357
|
+
have_answer = True
|
|
358
|
+
yield self.recorder.get_progress_answers() if self.recorder else None
|
|
359
|
+
console_skill_response(
|
|
360
|
+
skill_name=function_name,
|
|
361
|
+
response=self.recorder.get_answer() if self.recorder else "",
|
|
362
|
+
max_length=1024,
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
if not have_answer:
|
|
366
|
+
(
|
|
367
|
+
self.recorder.update(
|
|
368
|
+
item=f"调用{function_name}工具时未正确返回结果,需要重新调用。",
|
|
369
|
+
source_type=SourceType.EXPLORE,
|
|
370
|
+
)
|
|
371
|
+
if self.recorder
|
|
372
|
+
else None
|
|
373
|
+
)
|
|
374
|
+
except ToolInterrupt as e:
|
|
375
|
+
if "tool" in self.context.get_all_variables().keys():
|
|
376
|
+
self.context.delete_variable("tool")
|
|
377
|
+
yield self.recorder.get_progress_answers() if self.recorder else None
|
|
378
|
+
raise e
|
|
379
|
+
except Exception as e:
|
|
380
|
+
logger.error(f"调用工具存在错误,错误类型: {type(e)}")
|
|
381
|
+
logger.error(f"错误详细信息: {str(e)}")
|
|
382
|
+
return_answer["think"] = (
|
|
383
|
+
f"调用{function_name}工具时发生错误,需要重新调用。错误信息: {str(e)}"
|
|
384
|
+
)
|
|
385
|
+
return_answer["answer"] = (
|
|
386
|
+
f"调用{function_name}工具时发生错误,需要重新调用。错误信息: {str(e)}"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
return_answer["status"] = "completed"
|
|
390
|
+
yield [return_answer]
|
|
391
|
+
|
|
392
|
+
# append tool response message to maintain consistent message flow
|
|
393
|
+
tool_response, metadata = self._process_skill_result_with_hook(function_name)
|
|
394
|
+
|
|
395
|
+
if tool_response:
|
|
396
|
+
# Extract tool_call_id from the restored messages
|
|
397
|
+
tool_call_id = self._extract_tool_call_id()
|
|
398
|
+
if not tool_call_id:
|
|
399
|
+
tool_call_id = f"call_{function_name}_{self.times}"
|
|
400
|
+
|
|
401
|
+
self._append_tool_message(tool_call_id, str(tool_response), metadata)
|
|
402
|
+
|
|
403
|
+
async def _handle_new_tool_call(self, no_cache: bool):
|
|
404
|
+
"""Handling new tool calls"""
|
|
405
|
+
# Get LLM message
|
|
406
|
+
llm_messages = self.context.context_manager.to_dph_messages()
|
|
407
|
+
|
|
408
|
+
llm_params = {
|
|
409
|
+
"messages": llm_messages,
|
|
410
|
+
"model": self.model,
|
|
411
|
+
"no_cache": no_cache,
|
|
412
|
+
"tools": self.get_skillkit().getSkillsSchema(),
|
|
413
|
+
}
|
|
414
|
+
# propagate tool_choice if provided in params/block
|
|
415
|
+
if getattr(self, "tool_choice", None):
|
|
416
|
+
llm_params["tool_choice"] = self.tool_choice
|
|
417
|
+
|
|
418
|
+
# Create stream renderer for live markdown (CLI layer)
|
|
419
|
+
renderer = None
|
|
420
|
+
on_chunk = None
|
|
421
|
+
if self.context.is_cli_mode():
|
|
422
|
+
try:
|
|
423
|
+
from dolphin.cli.ui.stream_renderer import LiveStreamRenderer
|
|
424
|
+
renderer = LiveStreamRenderer(verbose=self.context.is_verbose())
|
|
425
|
+
renderer.start()
|
|
426
|
+
on_chunk = renderer.on_chunk
|
|
427
|
+
except ImportError:
|
|
428
|
+
pass
|
|
429
|
+
|
|
430
|
+
try:
|
|
431
|
+
# Initialize the stream_item variable to avoid undefined error
|
|
432
|
+
stream_item = StreamItem()
|
|
433
|
+
async for stream_item in self.llm_chat_stream(
|
|
434
|
+
llm_params=llm_params,
|
|
435
|
+
recorder=self.recorder,
|
|
436
|
+
content=self.content if self.content else "",
|
|
437
|
+
early_stop_on_tool_call=True,
|
|
438
|
+
on_stream_chunk=on_chunk,
|
|
439
|
+
):
|
|
440
|
+
if not stream_item.has_tool_call():
|
|
441
|
+
yield self.recorder.get_progress_answers() if self.recorder else None
|
|
442
|
+
elif stream_item.has_complete_tool_call():
|
|
443
|
+
logger.debug(
|
|
444
|
+
f"explore[{self.output_var}] find skill call [{stream_item.tool_name}]"
|
|
445
|
+
)
|
|
446
|
+
break
|
|
447
|
+
finally:
|
|
448
|
+
if renderer:
|
|
449
|
+
renderer.stop()
|
|
450
|
+
|
|
451
|
+
# Removed extra newline - renderer.stop() already handles this
|
|
452
|
+
|
|
453
|
+
if self.times >= MAX_SKILL_CALL_TIMES:
|
|
454
|
+
self.context.warn(
|
|
455
|
+
f"max skill call times reached {MAX_SKILL_CALL_TIMES} times, answer[{stream_item.to_dict()}]"
|
|
456
|
+
)
|
|
457
|
+
else:
|
|
458
|
+
self.times += 1
|
|
459
|
+
|
|
460
|
+
(
|
|
461
|
+
self.recorder.update(
|
|
462
|
+
item=stream_item,
|
|
463
|
+
raw_output=stream_item.answer,
|
|
464
|
+
is_completed=True,
|
|
465
|
+
source_type=SourceType.EXPLORE,
|
|
466
|
+
)
|
|
467
|
+
if self.recorder
|
|
468
|
+
else None
|
|
469
|
+
)
|
|
470
|
+
yield self.recorder.get_progress_answers() if self.recorder else None
|
|
471
|
+
|
|
472
|
+
if not stream_item.has_tool_call():
|
|
473
|
+
self._append_assistant_message(stream_item.answer)
|
|
474
|
+
self.context.debug(f"no valid skill call, answer[{stream_item.answer}]")
|
|
475
|
+
|
|
476
|
+
# Stop exploring as soon as there is one instance without tool invocation
|
|
477
|
+
self.should_stop_exploration = True
|
|
478
|
+
self.context.debug("没有工具调用,停止探索")
|
|
479
|
+
|
|
480
|
+
return
|
|
481
|
+
|
|
482
|
+
# Add assistant message containing tool calls
|
|
483
|
+
tool_call_id = f"call_{stream_item.tool_name}_{self.times}"
|
|
484
|
+
tool_call_openai_format = [
|
|
485
|
+
{
|
|
486
|
+
"id": tool_call_id,
|
|
487
|
+
"type": "function",
|
|
488
|
+
"function": {
|
|
489
|
+
"name": stream_item.tool_name,
|
|
490
|
+
"arguments": (
|
|
491
|
+
json.dumps(stream_item.tool_args, ensure_ascii=False)
|
|
492
|
+
if stream_item.tool_args
|
|
493
|
+
else "{}"
|
|
494
|
+
),
|
|
495
|
+
},
|
|
496
|
+
}
|
|
497
|
+
]
|
|
498
|
+
|
|
499
|
+
tool_call = stream_item.get_tool_call()
|
|
500
|
+
# When enable_skill_deduplicator is False, disable the deduplication logic and always treat as non-duplicate calls.
|
|
501
|
+
if (not getattr(self, "enable_skill_deduplicator", True)) or (
|
|
502
|
+
not self.deduplicator_skillcall.is_duplicate(tool_call)
|
|
503
|
+
):
|
|
504
|
+
self._append_tool_call_message(
|
|
505
|
+
stream_item, tool_call_openai_format
|
|
506
|
+
)
|
|
507
|
+
self.deduplicator_skillcall.add(tool_call)
|
|
508
|
+
|
|
509
|
+
async for ret in self._execute_tool_call(stream_item, tool_call_id):
|
|
510
|
+
yield ret
|
|
511
|
+
else:
|
|
512
|
+
await self._handle_duplicate_tool_call(tool_call, stream_item)
|
|
513
|
+
|
|
514
|
+
async def _execute_tool_call(self, stream_item, tool_call_id: str):
|
|
515
|
+
"""Execute tool call"""
|
|
516
|
+
intervention_tmp_key = "intervention_explore_block_vars"
|
|
517
|
+
|
|
518
|
+
try:
|
|
519
|
+
intervention_vars = {
|
|
520
|
+
"prompt": self.context.get_messages().get_messages_as_dict(),
|
|
521
|
+
"tool_name": stream_item.tool_name,
|
|
522
|
+
"cur_llm_stream_answer": stream_item.answer,
|
|
523
|
+
"all_answer": stream_item.answer,
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
self.context.set_variable(intervention_tmp_key, intervention_vars)
|
|
527
|
+
|
|
528
|
+
async for resp in self.skill_run(
|
|
529
|
+
source_type=SourceType.EXPLORE,
|
|
530
|
+
skill_name=stream_item.tool_name,
|
|
531
|
+
skill_params_json=(
|
|
532
|
+
stream_item.tool_args if stream_item.tool_args else {}
|
|
533
|
+
),
|
|
534
|
+
):
|
|
535
|
+
yield self.recorder.get_progress_answers() if self.recorder else None
|
|
536
|
+
|
|
537
|
+
self.deduplicator_skillcall.add(
|
|
538
|
+
stream_item.get_tool_call(),
|
|
539
|
+
self.recorder.get_answer() if self.recorder else None,
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
# Add tool response message
|
|
543
|
+
tool_response, metadata = self._process_skill_result_with_hook(stream_item.tool_name)
|
|
544
|
+
|
|
545
|
+
answer_content: str = (
|
|
546
|
+
tool_response
|
|
547
|
+
if tool_response is not None
|
|
548
|
+
and not CognitiveSkillkit.is_cognitive_skill(stream_item.tool_name)
|
|
549
|
+
else ""
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
if len(answer_content) > self.context.get_max_answer_len():
|
|
553
|
+
answer_content = answer_content[
|
|
554
|
+
: self.context.get_max_answer_len()
|
|
555
|
+
] + "(... too long, truncated to {})".format(
|
|
556
|
+
self.context.get_max_answer_len()
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
self._append_tool_message(tool_call_id, answer_content, metadata)
|
|
560
|
+
|
|
561
|
+
except ToolInterrupt as e:
|
|
562
|
+
self._handle_tool_interrupt(e, stream_item.tool_name)
|
|
563
|
+
raise e
|
|
564
|
+
except Exception as e:
|
|
565
|
+
self._handle_tool_execution_error(e, stream_item.tool_name)
|
|
566
|
+
# Add tool response message even if error occurs (maintain context integrity)
|
|
567
|
+
error_content = f"Tool execution error: {str(e)}"
|
|
568
|
+
self._append_tool_message(tool_call_id, error_content, None)
|
|
569
|
+
|
|
570
|
+
async def _handle_duplicate_tool_call(self, tool_call, stream_item):
|
|
571
|
+
"""Handling Duplicate Tool Calls"""
|
|
572
|
+
message = get_msg_duplicate_skill_call()
|
|
573
|
+
self._append_assistant_message(message)
|
|
574
|
+
|
|
575
|
+
(
|
|
576
|
+
self.recorder.update(
|
|
577
|
+
item={"answer": message, "think": ""},
|
|
578
|
+
raw_output=stream_item.answer,
|
|
579
|
+
source_type=SourceType.EXPLORE,
|
|
580
|
+
)
|
|
581
|
+
if self.recorder
|
|
582
|
+
else None
|
|
583
|
+
)
|
|
584
|
+
self.context.warn(
|
|
585
|
+
f"Duplicate skill call detected: {self.deduplicator_skillcall._get_call_key(tool_call)}"
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
def _handle_tool_interrupt(self, e: Exception, tool_name: str):
|
|
589
|
+
"""Handling Tool Interruptions"""
|
|
590
|
+
self.context.info(f"tool interrupt in call {tool_name} tool")
|
|
591
|
+
if "※tool" in self.context.get_all_variables().keys():
|
|
592
|
+
self.context.delete_variable("※tool")
|
|
593
|
+
|
|
594
|
+
def _handle_tool_execution_error(self, e: Exception, tool_name: str):
|
|
595
|
+
"""Handling tool execution errors"""
|
|
596
|
+
error_trace = traceback.format_exc()
|
|
597
|
+
self.context.error(
|
|
598
|
+
f"error in call {tool_name} tool, error type: {type(e)}, error info: {str(e)}, error trace: {error_trace}"
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
def _should_continue_explore(self) -> bool:
|
|
602
|
+
"""Check whether to continue the next exploration.
|
|
603
|
+
|
|
604
|
+
Termination conditions:
|
|
605
|
+
1. Maximum number of tool calls reached
|
|
606
|
+
2. Duplicate tool call detected
|
|
607
|
+
3. No tool call occurred once
|
|
608
|
+
|
|
609
|
+
Returns:
|
|
610
|
+
bool: True if exploration should continue, False otherwise
|
|
611
|
+
"""
|
|
612
|
+
# 1. If the maximum number of calls has been reached, stop exploring
|
|
613
|
+
if self.times >= MAX_SKILL_CALL_TIMES:
|
|
614
|
+
return False
|
|
615
|
+
|
|
616
|
+
# 2. Check for duplicate calls (effective only when skill deduplicator is enabled)
|
|
617
|
+
if getattr(self, "enable_skill_deduplicator", True):
|
|
618
|
+
if self.deduplicator_skillcall.skillcalls:
|
|
619
|
+
recent_calls = list(self.deduplicator_skillcall.skillcalls.values())
|
|
620
|
+
if (
|
|
621
|
+
recent_calls
|
|
622
|
+
and max(recent_calls)
|
|
623
|
+
>= DeduplicatorSkillCall.MAX_DUPLICATE_COUNT
|
|
624
|
+
):
|
|
625
|
+
return False
|
|
626
|
+
|
|
627
|
+
# 3. Stop exploring when there is no tool call.
|
|
628
|
+
if self.should_stop_exploration:
|
|
629
|
+
return False
|
|
630
|
+
|
|
631
|
+
return True
|
|
632
|
+
|
|
633
|
+
def _process_skill_result_with_hook(self, skill_name: str) -> tuple[str | None, dict]:
|
|
634
|
+
"""Handle skill results using skillkit_hook
|
|
635
|
+
|
|
636
|
+
Args:
|
|
637
|
+
skill_name: Name of the skill
|
|
638
|
+
|
|
639
|
+
Returns:
|
|
640
|
+
tuple[str | None, dict]: (Processed result, metadata)
|
|
641
|
+
"""
|
|
642
|
+
# Get skill object
|
|
643
|
+
skill = self.context.get_skill(skill_name)
|
|
644
|
+
if not skill:
|
|
645
|
+
skill = SystemFunctions.getSkill(skill_name)
|
|
646
|
+
|
|
647
|
+
# Get the last stage as reference
|
|
648
|
+
last_stage = self.recorder.getProgress().get_last_stage()
|
|
649
|
+
reference = last_stage.get_raw_output() if last_stage else None
|
|
650
|
+
# Handle results using skillkit_hook (handles dynamic tools automatically)
|
|
651
|
+
if reference and self.skillkit_hook and self.context.has_skillkit_hook():
|
|
652
|
+
# Use new hook to get context-optimized content
|
|
653
|
+
content, metadata = self.skillkit_hook.on_before_send_to_context(
|
|
654
|
+
reference_id=reference.reference_id,
|
|
655
|
+
skill=skill,
|
|
656
|
+
skillkit_name=type(skill.owner_skillkit).__name__ if skill.owner_skillkit else "",
|
|
657
|
+
resource_skill_path=getattr(skill, 'resource_skill_path', None),
|
|
658
|
+
)
|
|
659
|
+
return content, metadata
|
|
660
|
+
|
|
661
|
+
return self.recorder.getProgress().get_step_answers(), {}
|
|
662
|
+
|
|
663
|
+
def _append_tool_message(
|
|
664
|
+
self,
|
|
665
|
+
tool_call_id: str,
|
|
666
|
+
answer_content: str,
|
|
667
|
+
metadata: Optional[dict] = None,
|
|
668
|
+
):
|
|
669
|
+
"""Add tool messages to context uniformly"""
|
|
670
|
+
scrapted_messages = Messages()
|
|
671
|
+
scrapted_messages.add_tool_response_message(
|
|
672
|
+
content=answer_content,
|
|
673
|
+
tool_call_id=tool_call_id,
|
|
674
|
+
metadata=metadata,
|
|
675
|
+
)
|
|
676
|
+
self.context.add_bucket(
|
|
677
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
678
|
+
scrapted_messages,
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
def _append_tool_call_message(
|
|
682
|
+
self,
|
|
683
|
+
stream_item,
|
|
684
|
+
tool_call_openai_format: list,
|
|
685
|
+
):
|
|
686
|
+
"""Add tool call messages to context uniformly"""
|
|
687
|
+
scrapted_messages = Messages()
|
|
688
|
+
scrapted_messages.add_tool_call_message(
|
|
689
|
+
content=stream_item.answer, tool_calls=tool_call_openai_format
|
|
690
|
+
)
|
|
691
|
+
self.context.add_bucket(
|
|
692
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
693
|
+
scrapted_messages,
|
|
694
|
+
)
|
|
695
|
+
|
|
696
|
+
def _append_assistant_message(self, content: str):
|
|
697
|
+
"""Add assistant message to context uniformly"""
|
|
698
|
+
scrapted_messages = Messages()
|
|
699
|
+
scrapted_messages.add_message(content, MessageRole.ASSISTANT)
|
|
700
|
+
self.context.add_bucket(
|
|
701
|
+
BuildInBucket.SCRATCHPAD.value,
|
|
702
|
+
scrapted_messages,
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
def _extract_tool_call_id(self) -> str | None:
|
|
706
|
+
"""Extract tool call ID from the message"""
|
|
707
|
+
messages_with_calls = self.context.get_messages_with_tool_calls()
|
|
708
|
+
if messages_with_calls:
|
|
709
|
+
last_call_msg = messages_with_calls[-1]
|
|
710
|
+
if last_call_msg.tool_calls:
|
|
711
|
+
return last_call_msg.tool_calls[0].get("id")
|
|
712
|
+
return None
|