kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,1865 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import json
|
|
3
|
+
import re
|
|
4
|
+
import ast
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any, Dict, List, Optional, AsyncGenerator
|
|
7
|
+
|
|
8
|
+
from dolphin.core import flags
|
|
9
|
+
from dolphin.core.common.constants import KEY_STATUS, PIN_MARKER
|
|
10
|
+
from dolphin.core.context_engineer.config.settings import BuildInBucket
|
|
11
|
+
from dolphin.core.common.exceptions import SkillException
|
|
12
|
+
from dolphin.core.utils.tools import ToolInterrupt
|
|
13
|
+
from dolphin.core.common.enums import (
|
|
14
|
+
CategoryBlock,
|
|
15
|
+
MessageRole,
|
|
16
|
+
Messages,
|
|
17
|
+
SkillInfo,
|
|
18
|
+
SkillType,
|
|
19
|
+
Status,
|
|
20
|
+
StreamItem,
|
|
21
|
+
TypeStage,
|
|
22
|
+
)
|
|
23
|
+
from dolphin.core.context.context import Context
|
|
24
|
+
from dolphin.core.logging.logger import (
|
|
25
|
+
console,
|
|
26
|
+
console_block_start,
|
|
27
|
+
console_agent_skill_enter,
|
|
28
|
+
console_agent_skill_exit,
|
|
29
|
+
console_skill_call,
|
|
30
|
+
console_skill_response,
|
|
31
|
+
get_logger,
|
|
32
|
+
)
|
|
33
|
+
from dolphin.core.trajectory.recorder import Recorder
|
|
34
|
+
from dolphin.core.skill.skillkit import Skillkit
|
|
35
|
+
from dolphin.core.skill.skill_matcher import SkillMatcher
|
|
36
|
+
from dolphin.lib.skillkits.system_skillkit import SystemFunctions
|
|
37
|
+
from dolphin.core.runtime.runtime_instance import ProgressInstance
|
|
38
|
+
from dolphin.core.llm.llm_client import LLMClient
|
|
39
|
+
from dolphin.core.common.types import SourceType
|
|
40
|
+
from dolphin.core.common.output_format import (
|
|
41
|
+
ObjectTypeOutputFormat,
|
|
42
|
+
OutputFormat,
|
|
43
|
+
OutputFormatFactory,
|
|
44
|
+
)
|
|
45
|
+
from dolphin.lib.skill_results.skillkit_hook import SkillkitHook
|
|
46
|
+
from dolphin.lib.skill_results.cache_backend import MemoryCacheBackend
|
|
47
|
+
from dolphin.lib.skill_results.strategy_registry import StrategyRegistry
|
|
48
|
+
from dolphin.lib.skill_results.strategies import (
|
|
49
|
+
DefaultAppStrategy,
|
|
50
|
+
DefaultLLMStrategy,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
logger = get_logger(__name__)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class BasicCodeBlock:
|
|
58
|
+
"""
|
|
59
|
+
Base class for all Dolphin Language code blocks.
|
|
60
|
+
|
|
61
|
+
This class provides core functionality for executing code blocks including:
|
|
62
|
+
- Block lifecycle management (initialization, execution, cleanup)
|
|
63
|
+
- Message and context management
|
|
64
|
+
- LLM interaction (llm_chat, llm_chat_stream)
|
|
65
|
+
- Variable replacement and output handling
|
|
66
|
+
- Skill execution and tool calling
|
|
67
|
+
- Trajectory recording and history management
|
|
68
|
+
|
|
69
|
+
Key Methods for Subclasses:
|
|
70
|
+
- execute(): Main entry point for block execution
|
|
71
|
+
- llm_chat_stream(): Stream LLM responses with tool call support
|
|
72
|
+
- _save_trajectory(): Save execution trajectory before bucket cleanup (used by explore blocks)
|
|
73
|
+
- _update_history_and_cleanup(): Update history variable and save trajectory (used by explore blocks)
|
|
74
|
+
|
|
75
|
+
Note: The _save_trajectory() and _update_history_and_cleanup() methods are typically called
|
|
76
|
+
by explore-type blocks (ExploreBlock, ExploreBlockV2) to maintain conversation history and
|
|
77
|
+
preserve tool call traces in the trajectory.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(self, context: Context):
|
|
81
|
+
self.context = context
|
|
82
|
+
self.llm_client: Optional[LLMClient] = None
|
|
83
|
+
self.category: Optional[CategoryBlock] = None
|
|
84
|
+
self.params: Dict[str, Any] = {}
|
|
85
|
+
self.content: Optional[str] = None
|
|
86
|
+
self.name = self.__class__.__name__ # Add name based on class name
|
|
87
|
+
self.assign_type: Optional[str] = None
|
|
88
|
+
self.output_var: Optional[str] = None
|
|
89
|
+
self.output_format: Optional[OutputFormat] = None
|
|
90
|
+
self.recorder: Optional[Recorder] = None
|
|
91
|
+
# tool_choice support (auto|none|required|provider-specific)
|
|
92
|
+
self.tool_choice: Optional[str] = None
|
|
93
|
+
# Whether to enable skill deduplication (used only in explore blocks, enabled by default)
|
|
94
|
+
self.enable_skill_deduplicator: bool = True
|
|
95
|
+
self.skills = None
|
|
96
|
+
self.system_prompt = ""
|
|
97
|
+
|
|
98
|
+
# Get skillkit_hook from Context, use the default if not available
|
|
99
|
+
if context and context.has_skillkit_hook():
|
|
100
|
+
self.skillkit_hook = context.get_skillkit_hook()
|
|
101
|
+
else:
|
|
102
|
+
# Register with default strategy
|
|
103
|
+
default_strategy_app = DefaultAppStrategy()
|
|
104
|
+
default_strategy_llm = DefaultLLMStrategy()
|
|
105
|
+
strategy_registry = StrategyRegistry()
|
|
106
|
+
strategy_registry.register("default", default_strategy_app, category="app")
|
|
107
|
+
strategy_registry.register("default", default_strategy_llm, category="llm")
|
|
108
|
+
self.skillkit_hook = SkillkitHook(
|
|
109
|
+
cache_backend=MemoryCacheBackend(),
|
|
110
|
+
strategy_registry=strategy_registry,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Set it back to Context so other components can use it
|
|
114
|
+
# This ensures context retention strategies work correctly
|
|
115
|
+
if context:
|
|
116
|
+
context.set_skillkit_hook(self.skillkit_hook)
|
|
117
|
+
|
|
118
|
+
def validate(self, content):
|
|
119
|
+
"""Verify the correctness of the content
|
|
120
|
+
:param content: The content to be verified
|
|
121
|
+
:return: Boolean value indicating whether the content is valid
|
|
122
|
+
"""
|
|
123
|
+
pass
|
|
124
|
+
|
|
125
|
+
async def execute(
|
|
126
|
+
self, content, category: CategoryBlock, replace_variables=True
|
|
127
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
128
|
+
"""Execute code block asynchronously
|
|
129
|
+
:param content: Block content
|
|
130
|
+
:param category: Block category
|
|
131
|
+
:param replace_variables: Whether to replace variables
|
|
132
|
+
:yields: Response stream as a dictionary
|
|
133
|
+
"""
|
|
134
|
+
# Set block in runtime graph first
|
|
135
|
+
if (
|
|
136
|
+
self.context
|
|
137
|
+
and hasattr(self.context, "runtime_graph")
|
|
138
|
+
and self.context.runtime_graph
|
|
139
|
+
):
|
|
140
|
+
self.context.runtime_graph.set_block(self)
|
|
141
|
+
|
|
142
|
+
# Prepare for trajectory tracking - each stage/block starts with clean state
|
|
143
|
+
if self.context:
|
|
144
|
+
# Reset context state for new block execution
|
|
145
|
+
# This unified method handles:
|
|
146
|
+
# 1. Trajectory stage baseline marking
|
|
147
|
+
# 2. Message mirror reset
|
|
148
|
+
# 3. Transient bucket cleanup (SCRATCHPAD, SYSTEM, QUERY)
|
|
149
|
+
self.context.reset_for_block()
|
|
150
|
+
|
|
151
|
+
self.parse_block_content(content, category, replace_variables)
|
|
152
|
+
# Add an empty yield statement to make this method an asynchronous generator
|
|
153
|
+
yield {} # Return an empty dictionary as a placeholder, maintaining the generator property
|
|
154
|
+
|
|
155
|
+
def get_cur_progress(self):
|
|
156
|
+
assert self.recorder is not None, "recorder is None"
|
|
157
|
+
return self.recorder.getProgress()
|
|
158
|
+
|
|
159
|
+
def find_matching_paren(self, s: str, start: int) -> int:
|
|
160
|
+
"""Find the matching right parenthesis position, ignoring parentheses within strings, handling special cases such as nested single and double quotes, triple quotes, various quote symbols, etc."""
|
|
161
|
+
count = 1
|
|
162
|
+
i = start + 1
|
|
163
|
+
in_single = False
|
|
164
|
+
in_double = False
|
|
165
|
+
in_triple_single = False
|
|
166
|
+
in_triple_double = False
|
|
167
|
+
|
|
168
|
+
def is_word_apostrophe(idx: int) -> bool:
|
|
169
|
+
if s[idx] != "'" or idx == 0 or idx + 1 >= len(s):
|
|
170
|
+
return False
|
|
171
|
+
return s[idx - 1].isalnum() and s[idx + 1].isalnum()
|
|
172
|
+
|
|
173
|
+
while i < len(s):
|
|
174
|
+
if s[i : i + 3] == "'''" and not in_double and not in_triple_double:
|
|
175
|
+
in_triple_single = not in_triple_single
|
|
176
|
+
i += 3
|
|
177
|
+
continue
|
|
178
|
+
if s[i : i + 3] == '"""' and not in_single and not in_triple_single:
|
|
179
|
+
in_triple_double = not in_triple_double
|
|
180
|
+
i += 3
|
|
181
|
+
continue
|
|
182
|
+
|
|
183
|
+
if in_triple_single or in_triple_double:
|
|
184
|
+
i += 1
|
|
185
|
+
continue
|
|
186
|
+
|
|
187
|
+
c = s[i]
|
|
188
|
+
|
|
189
|
+
if c == "\\" and i + 1 < len(s):
|
|
190
|
+
i += 2
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
if c == "'" and not in_double and not is_word_apostrophe(i):
|
|
194
|
+
in_single = not in_single
|
|
195
|
+
i += 1
|
|
196
|
+
continue
|
|
197
|
+
if c == '"' and not in_single:
|
|
198
|
+
in_double = not in_double
|
|
199
|
+
i += 1
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
if not (in_single or in_double or in_triple_single or in_triple_double):
|
|
203
|
+
if c == "(":
|
|
204
|
+
count += 1
|
|
205
|
+
elif c == ")":
|
|
206
|
+
count -= 1
|
|
207
|
+
if count == 0:
|
|
208
|
+
return i
|
|
209
|
+
|
|
210
|
+
i += 1
|
|
211
|
+
|
|
212
|
+
return -1
|
|
213
|
+
|
|
214
|
+
def split_parameters_smartly(self, params_str: str) -> List[str]:
|
|
215
|
+
"""
|
|
216
|
+
Intelligently splits parameter strings, correctly handling brackets, braces, and quotes.
|
|
217
|
+
|
|
218
|
+
This method replaces the repetitive parameter splitting logic found in various Block classes.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
params_str: The parameter string, e.g., 'tools=[a,b], model="gpt-4", params={"key":"value"}'
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
A list of split parameters, e.g., ['tools=[a,b]', 'model="gpt-4"', 'params={"key":"value"}']
|
|
225
|
+
"""
|
|
226
|
+
items = []
|
|
227
|
+
current_item = ""
|
|
228
|
+
in_brackets = 0 # 方括号计数
|
|
229
|
+
in_braces = 0 # 大括号计数
|
|
230
|
+
in_quotes = False
|
|
231
|
+
quote_char = None
|
|
232
|
+
|
|
233
|
+
def is_word_apostrophe(idx: int) -> bool:
|
|
234
|
+
if params_str[idx] != "'" or idx == 0 or idx + 1 >= len(params_str):
|
|
235
|
+
return False
|
|
236
|
+
return params_str[idx - 1].isalnum() and params_str[idx + 1].isalnum()
|
|
237
|
+
|
|
238
|
+
for idx, char in enumerate(params_str):
|
|
239
|
+
if char in ['"', "'"] and not (char == "'" and is_word_apostrophe(idx)):
|
|
240
|
+
if not in_quotes:
|
|
241
|
+
in_quotes = True
|
|
242
|
+
quote_char = char
|
|
243
|
+
elif char == quote_char:
|
|
244
|
+
in_quotes = False
|
|
245
|
+
quote_char = None
|
|
246
|
+
current_item += char
|
|
247
|
+
elif char == "[":
|
|
248
|
+
in_brackets += 1
|
|
249
|
+
current_item += char
|
|
250
|
+
elif char == "]":
|
|
251
|
+
in_brackets -= 1
|
|
252
|
+
current_item += char
|
|
253
|
+
elif char == "{":
|
|
254
|
+
in_braces += 1
|
|
255
|
+
current_item += char
|
|
256
|
+
elif char == "}":
|
|
257
|
+
in_braces -= 1
|
|
258
|
+
current_item += char
|
|
259
|
+
elif char == "," and not in_quotes and in_brackets == 0 and in_braces == 0:
|
|
260
|
+
items.append(current_item.strip())
|
|
261
|
+
current_item = ""
|
|
262
|
+
else:
|
|
263
|
+
current_item += char
|
|
264
|
+
|
|
265
|
+
if current_item.strip():
|
|
266
|
+
items.append(current_item.strip())
|
|
267
|
+
|
|
268
|
+
return items
|
|
269
|
+
|
|
270
|
+
def parse_tools_parameter(self, value: str) -> List[str]:
|
|
271
|
+
"""Unified method for parsing tool parameters, supporting both quoted and unquoted tool names.
|
|
272
|
+
|
|
273
|
+
This method addresses inconsistencies in how different Block classes handle the 'tools' parameter,
|
|
274
|
+
especially the "name 'execPython' is not defined" error caused by ExploreBlock using eval().
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
value: The tool parameter string, for example:
|
|
278
|
+
- '["execBash", "execPython"]' # With quotes
|
|
279
|
+
- '[execBash, execPython]' # Without quotes
|
|
280
|
+
- 'execBash' # Single tool
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
A list of parsed tool names.
|
|
284
|
+
|
|
285
|
+
Raises:
|
|
286
|
+
SyntaxError: When brackets in array format are unmatched.
|
|
287
|
+
|
|
288
|
+
Examples:
|
|
289
|
+
>>> parser = BasicCodeBlock()
|
|
290
|
+
>>> parser.parse_tools_parameter('["execBash", "execPython"]')
|
|
291
|
+
['execBash', 'execPython']
|
|
292
|
+
>>> parser.parse_tools_parameter('[execBash, execPython]')
|
|
293
|
+
['execBash', 'execPython']
|
|
294
|
+
"""
|
|
295
|
+
if not value or not value.strip():
|
|
296
|
+
return []
|
|
297
|
+
|
|
298
|
+
value = value.strip()
|
|
299
|
+
|
|
300
|
+
# 检查数组格式的语法错误
|
|
301
|
+
if value.startswith("["):
|
|
302
|
+
if not value.endswith("]"):
|
|
303
|
+
raise SyntaxError(f"Unmatched brackets in tools parameter: {value}")
|
|
304
|
+
|
|
305
|
+
# 处理数组格式: ["tool1", "tool2"] 或 [tool1, tool2]
|
|
306
|
+
if value.startswith("[") and value.endswith("]"):
|
|
307
|
+
tools_str = value[1:-1].strip()
|
|
308
|
+
if not tools_str:
|
|
309
|
+
return []
|
|
310
|
+
|
|
311
|
+
# 检查引号是否匹配
|
|
312
|
+
quote_count_single = tools_str.count("'")
|
|
313
|
+
quote_count_double = tools_str.count('"')
|
|
314
|
+
if quote_count_single % 2 != 0:
|
|
315
|
+
raise SyntaxError(
|
|
316
|
+
f"Unmatched single quotes in tools parameter: {value}"
|
|
317
|
+
)
|
|
318
|
+
if quote_count_double % 2 != 0:
|
|
319
|
+
raise SyntaxError(
|
|
320
|
+
f"Unmatched double quotes in tools parameter: {value}"
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
# 使用状态机解析工具列表,正确处理引号和逗号
|
|
324
|
+
tools = []
|
|
325
|
+
current_tool = ""
|
|
326
|
+
in_quotes = False
|
|
327
|
+
quote_char = None
|
|
328
|
+
|
|
329
|
+
for char in tools_str:
|
|
330
|
+
if char in ['"', "'"]:
|
|
331
|
+
if not in_quotes:
|
|
332
|
+
# 进入引号
|
|
333
|
+
in_quotes = True
|
|
334
|
+
quote_char = char
|
|
335
|
+
elif char == quote_char:
|
|
336
|
+
# 匹配到相同引号,结束
|
|
337
|
+
in_quotes = False
|
|
338
|
+
quote_char = None
|
|
339
|
+
else:
|
|
340
|
+
# 引号内的其他引号,当做普通字符
|
|
341
|
+
current_tool += char
|
|
342
|
+
elif char == "," and not in_quotes:
|
|
343
|
+
# 不在引号内的逗号,分隔工具
|
|
344
|
+
cleaned_tool = current_tool.strip().strip("\"'")
|
|
345
|
+
if cleaned_tool:
|
|
346
|
+
tools.append(cleaned_tool)
|
|
347
|
+
current_tool = ""
|
|
348
|
+
else:
|
|
349
|
+
# 其他字符直接添加
|
|
350
|
+
current_tool += char
|
|
351
|
+
|
|
352
|
+
# 处理最后一个工具
|
|
353
|
+
cleaned_tool = current_tool.strip().strip("\"'")
|
|
354
|
+
if cleaned_tool:
|
|
355
|
+
tools.append(cleaned_tool)
|
|
356
|
+
|
|
357
|
+
return tools
|
|
358
|
+
|
|
359
|
+
# 处理单个工具或逗号分隔的工具
|
|
360
|
+
return [tool.strip().strip("\"'") for tool in value.split(",") if tool.strip()]
|
|
361
|
+
|
|
362
|
+
def parse_parameter_value(self, key: str, value: str, expected_type: str = None) -> Any:
|
|
363
|
+
"""
|
|
364
|
+
解析单个参数值,根据参数类型进行特殊处理
|
|
365
|
+
|
|
366
|
+
统一了各个Block类中重复的参数值处理逻辑,现在也支持 ToolBlock 的复杂参数类型
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
key: 参数名
|
|
370
|
+
value: 参数值字符串
|
|
371
|
+
expected_type: 期望的参数类型(可选),如 "string", "integer", "number", "boolean" 等
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
解析后的参数值
|
|
375
|
+
"""
|
|
376
|
+
original_value = value.strip()
|
|
377
|
+
|
|
378
|
+
# 标记是否是明确的字符串(带引号)
|
|
379
|
+
is_explicit_string = False
|
|
380
|
+
|
|
381
|
+
# 处理字符串值(带引号)
|
|
382
|
+
if (original_value.startswith('"') and original_value.endswith('"')) or (
|
|
383
|
+
original_value.startswith("'") and original_value.endswith("'")
|
|
384
|
+
):
|
|
385
|
+
original_value = original_value[1:-1] # 移除引号
|
|
386
|
+
is_explicit_string = True # 标记为明确的字符串类型
|
|
387
|
+
|
|
388
|
+
# 处理变量引用(以 $ 开头)
|
|
389
|
+
if original_value.startswith("$"):
|
|
390
|
+
original_value = self.context.get_variable_type(original_value)
|
|
391
|
+
|
|
392
|
+
# 优先处理特殊参数类型(避免被通用JSON解析干扰)
|
|
393
|
+
if key == "history":
|
|
394
|
+
if (
|
|
395
|
+
not original_value
|
|
396
|
+
or not isinstance(original_value, str)
|
|
397
|
+
or (
|
|
398
|
+
original_value.lower() != "true"
|
|
399
|
+
and original_value.lower() != "false"
|
|
400
|
+
and original_value.lower() != "True"
|
|
401
|
+
and original_value.lower() != "False"
|
|
402
|
+
)
|
|
403
|
+
):
|
|
404
|
+
raise SyntaxError(
|
|
405
|
+
f"history must be a boolean value, but got {original_value}"
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
return original_value.lower() == "true" or original_value.lower() == "True"
|
|
409
|
+
elif key == "tools":
|
|
410
|
+
assert (
|
|
411
|
+
isinstance(original_value, str) and original_value
|
|
412
|
+
), "tools must be a string"
|
|
413
|
+
|
|
414
|
+
return self.parse_tools_parameter(original_value)
|
|
415
|
+
elif key == "output":
|
|
416
|
+
# 处理输出格式参数
|
|
417
|
+
return {"type": "output_format", "value": original_value}
|
|
418
|
+
elif key in ["model", "system_prompt", "ttc_mode", "tool_choice", "mode"]:
|
|
419
|
+
return original_value
|
|
420
|
+
|
|
421
|
+
if type(original_value) != str:
|
|
422
|
+
return original_value
|
|
423
|
+
|
|
424
|
+
# 如果是明确的字符串类型(带引号),直接返回字符串,不做类型转换
|
|
425
|
+
if is_explicit_string:
|
|
426
|
+
return original_value
|
|
427
|
+
|
|
428
|
+
# 处理字典或JSON数组
|
|
429
|
+
if original_value.startswith("{") or original_value.startswith("["):
|
|
430
|
+
try:
|
|
431
|
+
return ast.literal_eval(original_value)
|
|
432
|
+
except:
|
|
433
|
+
try:
|
|
434
|
+
return json.loads(original_value)
|
|
435
|
+
except json.JSONDecodeError:
|
|
436
|
+
return original_value
|
|
437
|
+
|
|
438
|
+
# 根据期望类型进行转换(如果提供了 expected_type)
|
|
439
|
+
if expected_type:
|
|
440
|
+
# 如果期望类型是 string,则保持为字符串,不做自动类型推断
|
|
441
|
+
if expected_type in ["string", "str"]:
|
|
442
|
+
return original_value
|
|
443
|
+
# 如果期望类型是 integer/int,尝试转换为整数
|
|
444
|
+
elif expected_type in ["integer", "int"] and original_value.isdigit():
|
|
445
|
+
return int(original_value)
|
|
446
|
+
# 如果期望类型是 number/float,尝试转换为浮点数
|
|
447
|
+
elif expected_type in ["number", "float"]:
|
|
448
|
+
try:
|
|
449
|
+
return float(original_value)
|
|
450
|
+
except ValueError:
|
|
451
|
+
return original_value
|
|
452
|
+
# 如果期望类型是 boolean/bool,尝试转换为布尔值
|
|
453
|
+
elif expected_type in ["boolean", "bool"]:
|
|
454
|
+
if original_value.lower() == "true":
|
|
455
|
+
return True
|
|
456
|
+
elif original_value.lower() == "false":
|
|
457
|
+
return False
|
|
458
|
+
return original_value
|
|
459
|
+
# 其他类型按原值返回
|
|
460
|
+
else:
|
|
461
|
+
return original_value
|
|
462
|
+
|
|
463
|
+
# 如果没有提供 expected_type,则使用旧的自动类型推断逻辑
|
|
464
|
+
# 但要注意:这可能导致类型不匹配的问题
|
|
465
|
+
# 处理数值类型(仅当不是明确的字符串时)
|
|
466
|
+
if original_value.isdigit():
|
|
467
|
+
return int(original_value)
|
|
468
|
+
elif (
|
|
469
|
+
original_value.replace(".", "", 1).isdigit()
|
|
470
|
+
and original_value.count(".") == 1
|
|
471
|
+
):
|
|
472
|
+
return float(original_value)
|
|
473
|
+
|
|
474
|
+
# 处理布尔值
|
|
475
|
+
if original_value.lower() == "true":
|
|
476
|
+
return True
|
|
477
|
+
elif original_value.lower() == "false":
|
|
478
|
+
return False
|
|
479
|
+
|
|
480
|
+
# 通用参数处理
|
|
481
|
+
return original_value
|
|
482
|
+
|
|
483
|
+
def parse_parameters_from_string(self, params_str: str) -> Dict[str, Any]:
|
|
484
|
+
"""
|
|
485
|
+
从参数字符串解析出参数字典
|
|
486
|
+
|
|
487
|
+
这个方法统一了所有Block类的参数解析逻辑,替代各自的实现
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
params_str: 括号内的参数字符串,如 'tools=[a,b], model="gpt-4"'
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
解析后的参数字典
|
|
494
|
+
"""
|
|
495
|
+
params = {}
|
|
496
|
+
|
|
497
|
+
if not params_str.strip():
|
|
498
|
+
return params
|
|
499
|
+
|
|
500
|
+
# 处理JSON对象(先用占位符替换)
|
|
501
|
+
json_placeholders = {}
|
|
502
|
+
placeholder_count = 0
|
|
503
|
+
|
|
504
|
+
def replace_json(match):
|
|
505
|
+
nonlocal placeholder_count
|
|
506
|
+
key = match.group(1).strip()
|
|
507
|
+
json_str = match.group(2).strip()
|
|
508
|
+
placeholder = f"__JSON_PLACEHOLDER_{placeholder_count}__"
|
|
509
|
+
placeholder_count += 1
|
|
510
|
+
json_placeholders[key] = json_str
|
|
511
|
+
return f"{key}={placeholder}"
|
|
512
|
+
|
|
513
|
+
json_pattern = re.compile(r"(\w+)\s*=\s*({.*?})")
|
|
514
|
+
params_str = json_pattern.sub(replace_json, params_str)
|
|
515
|
+
|
|
516
|
+
# 智能分割参数
|
|
517
|
+
items = self.split_parameters_smartly(params_str)
|
|
518
|
+
|
|
519
|
+
for item in items:
|
|
520
|
+
if "=" not in item:
|
|
521
|
+
continue
|
|
522
|
+
|
|
523
|
+
key, value = item.split("=", 1)
|
|
524
|
+
key = key.strip()
|
|
525
|
+
value = value.strip()
|
|
526
|
+
|
|
527
|
+
# 处理JSON占位符
|
|
528
|
+
if key in json_placeholders:
|
|
529
|
+
try:
|
|
530
|
+
value = json.loads(json_placeholders[key])
|
|
531
|
+
except:
|
|
532
|
+
value = json_placeholders[key]
|
|
533
|
+
else:
|
|
534
|
+
value = self.parse_parameter_value(key, value)
|
|
535
|
+
|
|
536
|
+
params[key] = value
|
|
537
|
+
|
|
538
|
+
return params
|
|
539
|
+
|
|
540
|
+
def should_quote_variable_value(self, value: str) -> bool:
|
|
541
|
+
"""
|
|
542
|
+
判断变量值是否需要用引号包围
|
|
543
|
+
如果值包含逗号、括号等可能影响参数解析的字符,则需要引号保护
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
value: 变量值
|
|
547
|
+
|
|
548
|
+
Returns:
|
|
549
|
+
是否需要引号包围
|
|
550
|
+
"""
|
|
551
|
+
if not isinstance(value, str):
|
|
552
|
+
return False
|
|
553
|
+
|
|
554
|
+
# 检查是否包含可能导致解析问题的字符
|
|
555
|
+
special_chars = [",", "(", ")", "[", "]", "{", "}", "="]
|
|
556
|
+
return any(char in value for char in special_chars)
|
|
557
|
+
|
|
558
|
+
def parse_block_content(self, content: str, category=None, replace_variables=True):
|
|
559
|
+
"""
|
|
560
|
+
统一的Block内容解析方法
|
|
561
|
+
|
|
562
|
+
支持两种格式:
|
|
563
|
+
1. 普通块格式: "/block_prefix/(params) main_content -> output_var"
|
|
564
|
+
2. 工具块格式: "@tool_name(args) -> output_var"
|
|
565
|
+
|
|
566
|
+
Args:
|
|
567
|
+
content: 完整的Block内容
|
|
568
|
+
category: Block类别,对于tool格式可以为None
|
|
569
|
+
|
|
570
|
+
Raises:
|
|
571
|
+
ValueError: 内容格式无效时
|
|
572
|
+
"""
|
|
573
|
+
content = content.strip()
|
|
574
|
+
self.category = category
|
|
575
|
+
|
|
576
|
+
# 检测是否为工具格式(以 @ 开头)
|
|
577
|
+
if content.startswith("@"):
|
|
578
|
+
self._parse_tool_format(content)
|
|
579
|
+
|
|
580
|
+
self.progress = ProgressInstance(context=self.context)
|
|
581
|
+
|
|
582
|
+
# Ensure progress is registered to runtime_graph for tool format
|
|
583
|
+
if (
|
|
584
|
+
self.context
|
|
585
|
+
and hasattr(self.context, "runtime_graph")
|
|
586
|
+
and self.context.runtime_graph
|
|
587
|
+
):
|
|
588
|
+
# Ensure block is set before setting progress
|
|
589
|
+
if self.context.runtime_graph.cur_block is None:
|
|
590
|
+
self.context.runtime_graph.set_block(self)
|
|
591
|
+
self.context.runtime_graph.set_progress(self.progress)
|
|
592
|
+
|
|
593
|
+
self.recorder = Recorder(
|
|
594
|
+
context=self.context,
|
|
595
|
+
progress=self.progress,
|
|
596
|
+
assign_type=self.assign_type,
|
|
597
|
+
output_var=self.output_var,
|
|
598
|
+
)
|
|
599
|
+
return
|
|
600
|
+
|
|
601
|
+
# 获取替换变量后的content
|
|
602
|
+
if self.context and replace_variables:
|
|
603
|
+
content = self._variable_replace(content)
|
|
604
|
+
|
|
605
|
+
# 处理普通块格式
|
|
606
|
+
if category is None:
|
|
607
|
+
raise ValueError("category is required for non-tool format")
|
|
608
|
+
|
|
609
|
+
# 移除block前缀
|
|
610
|
+
prefix = "/" + category.value + "/"
|
|
611
|
+
if content.startswith(prefix):
|
|
612
|
+
content = content[len(prefix) :].strip()
|
|
613
|
+
|
|
614
|
+
# 解析参数(如果有)
|
|
615
|
+
params_dict = {}
|
|
616
|
+
if content.startswith("("):
|
|
617
|
+
params_end = self.find_matching_paren(content, 0)
|
|
618
|
+
if params_end == -1:
|
|
619
|
+
raise ValueError(f"Unmatched parentheses in: {content}")
|
|
620
|
+
|
|
621
|
+
params_str = content[1:params_end]
|
|
622
|
+
params_dict = self.parse_parameters_from_string(params_str)
|
|
623
|
+
content = content[params_end + 1 :].strip()
|
|
624
|
+
|
|
625
|
+
# 解析主内容和赋值
|
|
626
|
+
# 注意顺序:先匹配 ">>" 再匹配 "->",避免 "->>" 情况下优先匹配到 "->"
|
|
627
|
+
pattern = re.compile(r"(.*?)\s*(>>|->)\s*([\w\u4e00-\u9fff]+)$", re.DOTALL)
|
|
628
|
+
match = pattern.match(content)
|
|
629
|
+
if not match:
|
|
630
|
+
raise ValueError(f"Invalid block format: {content}")
|
|
631
|
+
|
|
632
|
+
main_content = match.group(1).strip()
|
|
633
|
+
assign_type = match.group(2)
|
|
634
|
+
output_var = match.group(3)
|
|
635
|
+
|
|
636
|
+
self.content = main_content
|
|
637
|
+
self.params = params_dict
|
|
638
|
+
self.assign_type = assign_type
|
|
639
|
+
self.output_var = output_var
|
|
640
|
+
|
|
641
|
+
if "history" not in self.params:
|
|
642
|
+
self.params["history"] = False
|
|
643
|
+
|
|
644
|
+
# system_prompt 变量解析
|
|
645
|
+
self.system_prompt = self.params.get("system_prompt", "")
|
|
646
|
+
if self.system_prompt:
|
|
647
|
+
self.system_prompt = self._variable_replace(self.system_prompt)
|
|
648
|
+
|
|
649
|
+
self.history = params_dict.get("history", "")
|
|
650
|
+
self.model = params_dict.get("model", "")
|
|
651
|
+
# tool_choice (optional)
|
|
652
|
+
self.tool_choice = params_dict.get("tool_choice", None)
|
|
653
|
+
|
|
654
|
+
self.skills = params_dict.get("skills", None)
|
|
655
|
+
if self.skills is None:
|
|
656
|
+
self.skills = params_dict.get("tools", None)
|
|
657
|
+
|
|
658
|
+
self._validate_skills()
|
|
659
|
+
|
|
660
|
+
self.ttc_mode = params_dict.get("ttc_mode", None)
|
|
661
|
+
self.no_cache = params_dict.get("no_cache", False)
|
|
662
|
+
self.flags = params_dict.get("flags", "")
|
|
663
|
+
# 是否启用技能调用去重(仅 explore 块会实际使用该参数)
|
|
664
|
+
self.enable_skill_deduplicator = params_dict.get(
|
|
665
|
+
"enable_skill_deduplicator", True
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
# 处理输出格式参数
|
|
669
|
+
output_param = params_dict.get("output", None)
|
|
670
|
+
if (
|
|
671
|
+
output_param
|
|
672
|
+
and isinstance(output_param, dict)
|
|
673
|
+
and output_param.get("type") == "output_format"
|
|
674
|
+
):
|
|
675
|
+
try:
|
|
676
|
+
global_types = self.context.get_global_types()
|
|
677
|
+
self.output_format = OutputFormatFactory.parseFromString(
|
|
678
|
+
output_param["value"], global_types
|
|
679
|
+
)
|
|
680
|
+
except ValueError as e:
|
|
681
|
+
console(
|
|
682
|
+
f"Warning: Failed to parse output format '{output_param['value']}': {e}"
|
|
683
|
+
)
|
|
684
|
+
self.output_format = None
|
|
685
|
+
else:
|
|
686
|
+
self.output_format = None
|
|
687
|
+
|
|
688
|
+
self.progress = ProgressInstance(context=self.context, flags=self.flags)
|
|
689
|
+
|
|
690
|
+
# Check if runtime_graph exists before using it (for testing compatibility)
|
|
691
|
+
if (
|
|
692
|
+
self.context
|
|
693
|
+
and hasattr(self.context, "runtime_graph")
|
|
694
|
+
and self.context.runtime_graph
|
|
695
|
+
):
|
|
696
|
+
# Ensure block is set before setting progress
|
|
697
|
+
if self.context.runtime_graph.cur_block is None:
|
|
698
|
+
self.context.runtime_graph.set_block(self)
|
|
699
|
+
self.context.runtime_graph.set_progress(self.progress)
|
|
700
|
+
|
|
701
|
+
self.recorder = Recorder(
|
|
702
|
+
context=self.context,
|
|
703
|
+
progress=self.progress,
|
|
704
|
+
assign_type=self.assign_type,
|
|
705
|
+
output_var=self.output_var,
|
|
706
|
+
)
|
|
707
|
+
|
|
708
|
+
def _validate_skills(self):
|
|
709
|
+
"""Validate that all requested skills/patterns match at least one available skill."""
|
|
710
|
+
if self.skills is not None and self.context:
|
|
711
|
+
current_skillkit = self.context.get_skillkit()
|
|
712
|
+
if current_skillkit:
|
|
713
|
+
available_skills = current_skillkit.getSkills()
|
|
714
|
+
owner_names = SkillMatcher.get_owner_skillkits(available_skills)
|
|
715
|
+
|
|
716
|
+
for pattern in self.skills:
|
|
717
|
+
if not any(
|
|
718
|
+
SkillMatcher.match_skill(
|
|
719
|
+
skill, pattern, owner_names=owner_names
|
|
720
|
+
)
|
|
721
|
+
for skill in available_skills
|
|
722
|
+
):
|
|
723
|
+
# Build a user-friendly error message
|
|
724
|
+
available_skill_names = [
|
|
725
|
+
s.get_function_name() for s in available_skills
|
|
726
|
+
]
|
|
727
|
+
error_lines = [
|
|
728
|
+
f"Skill pattern '{pattern}' did not match any available skills.",
|
|
729
|
+
"",
|
|
730
|
+
f"Available skills ({len(available_skill_names)}):",
|
|
731
|
+
]
|
|
732
|
+
if available_skill_names:
|
|
733
|
+
# Show up to 20 skills to avoid overly long error messages
|
|
734
|
+
displayed_skills = available_skill_names[:20]
|
|
735
|
+
for s in displayed_skills:
|
|
736
|
+
error_lines.append(f" - {s}")
|
|
737
|
+
if len(available_skill_names) > 20:
|
|
738
|
+
error_lines.append(
|
|
739
|
+
f" ... and {len(available_skill_names) - 20} more"
|
|
740
|
+
)
|
|
741
|
+
else:
|
|
742
|
+
error_lines.append(" (none)")
|
|
743
|
+
|
|
744
|
+
error_lines.extend(
|
|
745
|
+
[
|
|
746
|
+
"",
|
|
747
|
+
"Possible fixes:",
|
|
748
|
+
" 1. Check if the skill name/pattern is spelled correctly",
|
|
749
|
+
" 2. Ensure the skill is registered in your skillkit configuration",
|
|
750
|
+
" 3. Verify that the required skillkit module is loaded",
|
|
751
|
+
" 4. If using wildcards, ensure the pattern matches at least one skill (e.g. '*_resource*')",
|
|
752
|
+
" 5. If using skillkit namespace, use '<skillkit>.<pattern>' (e.g. 'resource_skillkit.*')",
|
|
753
|
+
]
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
raise SkillException(
|
|
757
|
+
code=f"SKILL_NOT_FOUND: {pattern}",
|
|
758
|
+
message="\n".join(error_lines),
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
def _parse_tool_format(self, content: str):
|
|
762
|
+
"""
|
|
763
|
+
解析工具格式: @tool_name(args) -> output_var
|
|
764
|
+
|
|
765
|
+
Args:
|
|
766
|
+
content: 工具调用内容
|
|
767
|
+
|
|
768
|
+
Raises:
|
|
769
|
+
ValueError: 格式无效时
|
|
770
|
+
"""
|
|
771
|
+
# 修改正则表达式以支持中文字符和Unicode字符
|
|
772
|
+
tool_pattern = re.compile(
|
|
773
|
+
r"@([\w\u4e00-\u9fff_-]+)\((.*?)\)\s*(->|>>)\s*([\w\u4e00-\u9fff]+)",
|
|
774
|
+
re.DOTALL,
|
|
775
|
+
)
|
|
776
|
+
match = tool_pattern.match(content.strip())
|
|
777
|
+
|
|
778
|
+
if not match:
|
|
779
|
+
raise ValueError(f"Invalid tool call format[{content}]")
|
|
780
|
+
|
|
781
|
+
tool_name = match.group(1)
|
|
782
|
+
args_str = match.group(2).strip()
|
|
783
|
+
assign_type = match.group(3)
|
|
784
|
+
output_var = match.group(4)
|
|
785
|
+
|
|
786
|
+
# 解析参数,传入 tool_name 以便获取 schema
|
|
787
|
+
args_dict = self.parse_tool_parameters_from_string(args_str, tool_name)
|
|
788
|
+
|
|
789
|
+
self.content = tool_name # 对于工具块,content 存储工具名
|
|
790
|
+
self.params = args_dict
|
|
791
|
+
self.assign_type = assign_type
|
|
792
|
+
self.output_var = output_var
|
|
793
|
+
self.category = CategoryBlock.TOOL # 工具块的 category 为 TOOL
|
|
794
|
+
|
|
795
|
+
def parse_tool_parameters_from_string(self, params_str: str, tool_name: str = None) -> Dict[str, Any]:
|
|
796
|
+
"""
|
|
797
|
+
专门用于解析 ToolBlock 参数的方法,支持复杂的参数格式
|
|
798
|
+
|
|
799
|
+
支持位置参数和命名参数:
|
|
800
|
+
- 位置参数:tool($arg1, $arg2)
|
|
801
|
+
- 命名参数:tool(arg1=$value1, arg2=$value2)
|
|
802
|
+
|
|
803
|
+
Args:
|
|
804
|
+
params_str: 参数字符串
|
|
805
|
+
tool_name: 工具名称(可选),用于获取工具 schema 以确定参数类型
|
|
806
|
+
|
|
807
|
+
Returns:
|
|
808
|
+
解析后的参数字典
|
|
809
|
+
"""
|
|
810
|
+
params = {}
|
|
811
|
+
|
|
812
|
+
if not params_str.strip():
|
|
813
|
+
return params
|
|
814
|
+
|
|
815
|
+
# 获取工具的 schema 信息(如果提供了 tool_name)
|
|
816
|
+
tool_schema = None
|
|
817
|
+
if tool_name and hasattr(self, 'context') and self.context:
|
|
818
|
+
try:
|
|
819
|
+
skillkit = self.get_skillkit()
|
|
820
|
+
if skillkit:
|
|
821
|
+
# 获取工具的 schema
|
|
822
|
+
skill = skillkit.getSkill(tool_name)
|
|
823
|
+
if skill:
|
|
824
|
+
tool_schema = skill.get_openai_tool_schema()
|
|
825
|
+
except:
|
|
826
|
+
# 如果获取 schema 失败,忽略错误,使用默认行为
|
|
827
|
+
pass
|
|
828
|
+
|
|
829
|
+
# 提取参数类型映射
|
|
830
|
+
param_types = {}
|
|
831
|
+
if tool_schema and "function" in tool_schema:
|
|
832
|
+
function_def = tool_schema["function"]
|
|
833
|
+
if "parameters" in function_def and "properties" in function_def["parameters"]:
|
|
834
|
+
properties = function_def["parameters"]["properties"]
|
|
835
|
+
for param_name, param_info in properties.items():
|
|
836
|
+
param_types[param_name] = param_info.get("type", "string")
|
|
837
|
+
|
|
838
|
+
# 处理多行参数,移除换行和多余空格
|
|
839
|
+
params_str = re.sub(r"\s*\n\s*", " ", params_str)
|
|
840
|
+
|
|
841
|
+
# 使用更智能的参数分割,而不是正则表达式
|
|
842
|
+
param_items = self.split_parameters_smartly(params_str)
|
|
843
|
+
|
|
844
|
+
positional_index = 0
|
|
845
|
+
|
|
846
|
+
for item in param_items:
|
|
847
|
+
item = item.strip()
|
|
848
|
+
if not item:
|
|
849
|
+
continue
|
|
850
|
+
|
|
851
|
+
if "=" in item:
|
|
852
|
+
# 命名参数
|
|
853
|
+
key, value = item.split("=", 1)
|
|
854
|
+
key = key.strip()
|
|
855
|
+
value = value.strip()
|
|
856
|
+
# 从 schema 中获取期望类型
|
|
857
|
+
expected_type = param_types.get(key)
|
|
858
|
+
params[key] = self.parse_parameter_value(key, value, expected_type)
|
|
859
|
+
else:
|
|
860
|
+
# 位置参数 - 使用索引作为键名
|
|
861
|
+
value = item.strip()
|
|
862
|
+
key = f"arg_{positional_index}"
|
|
863
|
+
params[key] = self.parse_parameter_value(key, value)
|
|
864
|
+
positional_index += 1
|
|
865
|
+
|
|
866
|
+
return params
|
|
867
|
+
|
|
868
|
+
def get_parameter_with_default(
|
|
869
|
+
self, params: Dict[str, Any], key: str, default: Any
|
|
870
|
+
) -> Any:
|
|
871
|
+
"""
|
|
872
|
+
获取参数值,如果不存在则返回默认值
|
|
873
|
+
|
|
874
|
+
Args:
|
|
875
|
+
params: 参数字典
|
|
876
|
+
key: 参数名
|
|
877
|
+
default: 默认值
|
|
878
|
+
|
|
879
|
+
Returns:
|
|
880
|
+
参数值或默认值
|
|
881
|
+
"""
|
|
882
|
+
return params.get(key, default)
|
|
883
|
+
|
|
884
|
+
def _is_history_enabled(self) -> bool:
|
|
885
|
+
"""检查 history 参数是否开启."""
|
|
886
|
+
if isinstance(self.history, bool):
|
|
887
|
+
return self.history
|
|
888
|
+
if isinstance(self.history, str):
|
|
889
|
+
return self.history.lower() == "true"
|
|
890
|
+
return False
|
|
891
|
+
|
|
892
|
+
def _get_history_messages(self) -> Optional[Messages]:
|
|
893
|
+
"""将 context 中的 history 变量转换为 Messages."""
|
|
894
|
+
history_vars = self.context.get_var_value("history")
|
|
895
|
+
if not history_vars:
|
|
896
|
+
return None
|
|
897
|
+
|
|
898
|
+
if isinstance(history_vars, Messages):
|
|
899
|
+
return history_vars if not history_vars.empty() else None
|
|
900
|
+
|
|
901
|
+
msgs = Messages()
|
|
902
|
+
if isinstance(history_vars, list):
|
|
903
|
+
for msg in history_vars:
|
|
904
|
+
if not isinstance(msg, dict):
|
|
905
|
+
continue
|
|
906
|
+
role = str(msg.get("role", "user")).lower()
|
|
907
|
+
content = msg.get("content", "")
|
|
908
|
+
if not content:
|
|
909
|
+
continue
|
|
910
|
+
if role == "assistant":
|
|
911
|
+
msgs.add_message(content, MessageRole.ASSISTANT)
|
|
912
|
+
elif role == "system":
|
|
913
|
+
msgs.add_message(content, MessageRole.SYSTEM)
|
|
914
|
+
else:
|
|
915
|
+
msgs.add_message(content, MessageRole.USER)
|
|
916
|
+
return msgs if not msgs.empty() else None
|
|
917
|
+
|
|
918
|
+
def _add_history_to_context_manager(self, bucket_prefix: str = "llm_history"):
|
|
919
|
+
"""
|
|
920
|
+
Add historical messages to context_manager if history parameter is enabled.
|
|
921
|
+
"""
|
|
922
|
+
if not self._is_history_enabled():
|
|
923
|
+
return
|
|
924
|
+
|
|
925
|
+
# avoid duplicate injection within the same context lifecycle
|
|
926
|
+
try:
|
|
927
|
+
if getattr(self.context, "_history_injected", False):
|
|
928
|
+
return
|
|
929
|
+
except Exception:
|
|
930
|
+
pass
|
|
931
|
+
|
|
932
|
+
# 若 conversation_history 已含内容,则视为已有历史,不再二次注入,避免重复
|
|
933
|
+
try:
|
|
934
|
+
cm = self.context.context_manager
|
|
935
|
+
bucket = (
|
|
936
|
+
cm.state.buckets.get("conversation_history")
|
|
937
|
+
if hasattr(cm, "state")
|
|
938
|
+
else None
|
|
939
|
+
)
|
|
940
|
+
if bucket is not None:
|
|
941
|
+
from dolphin.core.common.enums import Messages as _Msgs
|
|
942
|
+
|
|
943
|
+
if isinstance(bucket.content, _Msgs) and bucket.content.get_messages():
|
|
944
|
+
return
|
|
945
|
+
except Exception:
|
|
946
|
+
pass
|
|
947
|
+
|
|
948
|
+
history_messages = self._get_history_messages()
|
|
949
|
+
if not history_messages:
|
|
950
|
+
return
|
|
951
|
+
|
|
952
|
+
self.context.add_bucket(
|
|
953
|
+
"conversation_history", history_messages, message_role=MessageRole.USER
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
# 标记已注入,防止重复
|
|
957
|
+
try:
|
|
958
|
+
setattr(self.context, "_history_injected", True)
|
|
959
|
+
except Exception:
|
|
960
|
+
pass
|
|
961
|
+
|
|
962
|
+
async def llm_chat(
|
|
963
|
+
self,
|
|
964
|
+
lang_mode: str,
|
|
965
|
+
with_skill: bool = False,
|
|
966
|
+
early_stop_on_tool_call: bool = False,
|
|
967
|
+
):
|
|
968
|
+
assert self.recorder, "recorder is None"
|
|
969
|
+
messages = Messages()
|
|
970
|
+
|
|
971
|
+
normalized_history = self.context.get_history_messages()
|
|
972
|
+
|
|
973
|
+
if self.system_prompt:
|
|
974
|
+
self.context.add_bucket(
|
|
975
|
+
BuildInBucket.SYSTEM.value,
|
|
976
|
+
self.system_prompt,
|
|
977
|
+
)
|
|
978
|
+
if self.content:
|
|
979
|
+
self.context.add_bucket(
|
|
980
|
+
BuildInBucket.QUERY.value,
|
|
981
|
+
self.content,
|
|
982
|
+
)
|
|
983
|
+
if self.history and normalized_history:
|
|
984
|
+
# 使用专用 helper 确保 history bucket 与变量池中的历史快照一致
|
|
985
|
+
self.context.set_history_bucket(normalized_history)
|
|
986
|
+
|
|
987
|
+
messages = self.context.context_manager.to_dph_messages()
|
|
988
|
+
|
|
989
|
+
# 如果有输出格式要求,则添加格式约束到 messages
|
|
990
|
+
if self.output_format:
|
|
991
|
+
self.output_format.addFormatConstraintToMessages(messages)
|
|
992
|
+
|
|
993
|
+
# 准备 LLM 调用参数
|
|
994
|
+
llm_params = {
|
|
995
|
+
"messages": messages,
|
|
996
|
+
"model": self.model,
|
|
997
|
+
"ttc_mode": self.ttc_mode,
|
|
998
|
+
"output_var": self.output_var,
|
|
999
|
+
"lang_mode": lang_mode,
|
|
1000
|
+
"no_cache": self.no_cache,
|
|
1001
|
+
}
|
|
1002
|
+
|
|
1003
|
+
if with_skill:
|
|
1004
|
+
llm_params["tools"] = self.get_skillkit().getSkillsSchema()
|
|
1005
|
+
if self.tool_choice:
|
|
1006
|
+
llm_params["tool_choice"] = self.tool_choice
|
|
1007
|
+
elif self.output_format and isinstance(
|
|
1008
|
+
self.output_format, ObjectTypeOutputFormat
|
|
1009
|
+
):
|
|
1010
|
+
# 如果是 ObjectType 格式,添加 function_call tools
|
|
1011
|
+
try:
|
|
1012
|
+
tools = self.output_format.generateFunctionCallTools()
|
|
1013
|
+
llm_params["tools"] = tools
|
|
1014
|
+
if self.tool_choice:
|
|
1015
|
+
llm_params["tool_choice"] = self.tool_choice
|
|
1016
|
+
except Exception as e:
|
|
1017
|
+
console(f"Warning: Failed to generate function call tools: {e}")
|
|
1018
|
+
|
|
1019
|
+
# Create stream renderer for live markdown (CLI layer)
|
|
1020
|
+
renderer = None
|
|
1021
|
+
on_chunk = None
|
|
1022
|
+
if self.context.is_cli_mode():
|
|
1023
|
+
try:
|
|
1024
|
+
from dolphin.cli.ui.stream_renderer import LiveStreamRenderer
|
|
1025
|
+
renderer = LiveStreamRenderer(verbose=self.context.is_verbose())
|
|
1026
|
+
renderer.start()
|
|
1027
|
+
on_chunk = renderer.on_chunk
|
|
1028
|
+
except ImportError:
|
|
1029
|
+
pass
|
|
1030
|
+
|
|
1031
|
+
last_stream_item: Optional[StreamItem] = None
|
|
1032
|
+
assert self.content, "content is None"
|
|
1033
|
+
try:
|
|
1034
|
+
async for stream_item in self.llm_chat_stream(
|
|
1035
|
+
llm_params,
|
|
1036
|
+
self.recorder,
|
|
1037
|
+
self.content,
|
|
1038
|
+
early_stop_on_tool_call,
|
|
1039
|
+
on_stream_chunk=on_chunk,
|
|
1040
|
+
):
|
|
1041
|
+
last_stream_item = stream_item
|
|
1042
|
+
yield stream_item.to_dict()
|
|
1043
|
+
finally:
|
|
1044
|
+
if renderer:
|
|
1045
|
+
renderer.stop()
|
|
1046
|
+
|
|
1047
|
+
assert last_stream_item, f"failed read from llm[{llm_params}]"
|
|
1048
|
+
|
|
1049
|
+
# 如果有输出格式要求,解析最终结果并将解析后的对象存储到变量中
|
|
1050
|
+
final_answer = last_stream_item.answer
|
|
1051
|
+
if self.output_format:
|
|
1052
|
+
if final_answer:
|
|
1053
|
+
try:
|
|
1054
|
+
parsed_result = self.output_format.parseResponse(final_answer)
|
|
1055
|
+
console(
|
|
1056
|
+
f"\n[Parsed Result]: {parsed_result}",
|
|
1057
|
+
verbose=self.context.is_verbose(),
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
# 创建一个包含解析后对象的新结果项,用于存储到变量
|
|
1061
|
+
# 保持原始的 answer 和 think,但主要输出是解析后的对象
|
|
1062
|
+
last_stream_item.set_output_var_value(parsed_result)
|
|
1063
|
+
|
|
1064
|
+
self.recorder.update(
|
|
1065
|
+
item=last_stream_item,
|
|
1066
|
+
is_completed=True,
|
|
1067
|
+
source_type=SourceType.LLM,
|
|
1068
|
+
)
|
|
1069
|
+
yield last_stream_item.to_dict()
|
|
1070
|
+
|
|
1071
|
+
except Exception as e:
|
|
1072
|
+
console(f"\nWarning: Failed to parse output format: {e}")
|
|
1073
|
+
# 保持原始答案,不中断流程
|
|
1074
|
+
self.recorder.update(
|
|
1075
|
+
item=last_stream_item,
|
|
1076
|
+
is_completed=True,
|
|
1077
|
+
source_type=SourceType.LLM,
|
|
1078
|
+
)
|
|
1079
|
+
yield last_stream_item.to_dict()
|
|
1080
|
+
elif (
|
|
1081
|
+
isinstance(self.output_format, ObjectTypeOutputFormat)
|
|
1082
|
+
and last_stream_item.has_complete_tool_call()
|
|
1083
|
+
):
|
|
1084
|
+
last_stream_item.set_output_var_value(last_stream_item.tool_args)
|
|
1085
|
+
self.recorder.update(
|
|
1086
|
+
item=last_stream_item.tool_args,
|
|
1087
|
+
is_completed=True,
|
|
1088
|
+
source_type=SourceType.LLM,
|
|
1089
|
+
)
|
|
1090
|
+
yield last_stream_item.tool_args
|
|
1091
|
+
else:
|
|
1092
|
+
self.recorder.update(
|
|
1093
|
+
item=last_stream_item, is_completed=True, source_type=SourceType.LLM
|
|
1094
|
+
)
|
|
1095
|
+
yield last_stream_item.to_dict()
|
|
1096
|
+
|
|
1097
|
+
console("\n", verbose=self.context.is_verbose())
|
|
1098
|
+
|
|
1099
|
+
def block_start_log(self, block_name: str, content: Optional[str] = None):
|
|
1100
|
+
assert self.output_var
|
|
1101
|
+
console_block_start(
|
|
1102
|
+
block_name, self.output_var, content, verbose=self.context.verbose
|
|
1103
|
+
)
|
|
1104
|
+
|
|
1105
|
+
def record_llm_response_to_trajectory(
|
|
1106
|
+
self, last_item: Optional[Dict[str, Any]]
|
|
1107
|
+
) -> None:
|
|
1108
|
+
"""
|
|
1109
|
+
Record LLM response to trajectory tracking
|
|
1110
|
+
Extract answer from the last item returned by llm_chat and add it to context
|
|
1111
|
+
|
|
1112
|
+
Args:
|
|
1113
|
+
last_item: The last item yielded from llm_chat
|
|
1114
|
+
"""
|
|
1115
|
+
if last_item and "answer" in last_item:
|
|
1116
|
+
answer_content = last_item["answer"]
|
|
1117
|
+
if isinstance(answer_content, dict) and "answer" in answer_content:
|
|
1118
|
+
answer_text = answer_content["answer"]
|
|
1119
|
+
elif isinstance(answer_content, str):
|
|
1120
|
+
answer_text = answer_content
|
|
1121
|
+
else:
|
|
1122
|
+
answer_text = str(answer_content)
|
|
1123
|
+
self.context.add_assistant_message(answer_text)
|
|
1124
|
+
|
|
1125
|
+
def get_skillkit(self):
|
|
1126
|
+
"""获取当前代码块可用的技能集(仅依赖 Context.get_skillkit 主流程逻辑)"""
|
|
1127
|
+
return self.context.get_skillkit(self.skills)
|
|
1128
|
+
|
|
1129
|
+
async def skill_run(
|
|
1130
|
+
self,
|
|
1131
|
+
source_type: SourceType,
|
|
1132
|
+
skill_name: str,
|
|
1133
|
+
skill_params_json: Dict[str, Any] = {},
|
|
1134
|
+
props=None,
|
|
1135
|
+
):
|
|
1136
|
+
if self.context.is_skillkit_empty():
|
|
1137
|
+
self.context.warn(f"skillkit is None, skill_name[{skill_name}]")
|
|
1138
|
+
return
|
|
1139
|
+
|
|
1140
|
+
skill = self.context.get_skill(skill_name)
|
|
1141
|
+
if not skill:
|
|
1142
|
+
skill = SystemFunctions.getSkill(skill_name)
|
|
1143
|
+
|
|
1144
|
+
if skill is None:
|
|
1145
|
+
async for result in self.yield_message(
|
|
1146
|
+
f"没有{skill_name}工具可以调用!", ""
|
|
1147
|
+
):
|
|
1148
|
+
yield result
|
|
1149
|
+
return
|
|
1150
|
+
|
|
1151
|
+
# Create initial SKILL stage to track skill execution start
|
|
1152
|
+
assert self.recorder, "recorder is None"
|
|
1153
|
+
self.recorder.getProgress().add_stage(
|
|
1154
|
+
agent_name=skill_name,
|
|
1155
|
+
stage=TypeStage.SKILL,
|
|
1156
|
+
status=Status.PROCESSING,
|
|
1157
|
+
skill_info=SkillInfo.build(
|
|
1158
|
+
skill_type=SkillType.TOOL,
|
|
1159
|
+
skill_name=skill_name,
|
|
1160
|
+
skill_args=skill_params_json,
|
|
1161
|
+
),
|
|
1162
|
+
input_content=str(skill_params_json),
|
|
1163
|
+
interrupted=False,
|
|
1164
|
+
)
|
|
1165
|
+
|
|
1166
|
+
# notify app
|
|
1167
|
+
async for result in self.yield_message(answer="", think=""):
|
|
1168
|
+
yield result
|
|
1169
|
+
|
|
1170
|
+
agent_as_skill = self.context.get_agent_skill(skill)
|
|
1171
|
+
if agent_as_skill is not None:
|
|
1172
|
+
cur_agent = self.context.get_cur_agent()
|
|
1173
|
+
if (
|
|
1174
|
+
cur_agent is not None
|
|
1175
|
+
and agent_as_skill.get_name() == cur_agent.get_name()
|
|
1176
|
+
):
|
|
1177
|
+
error_message = f"禁止代理 {skill_name} 调用自身为技能。"
|
|
1178
|
+
self.context.error(error_message)
|
|
1179
|
+
if self.recorder is not None:
|
|
1180
|
+
self.recorder.update(
|
|
1181
|
+
item={"think": "", "answer": error_message},
|
|
1182
|
+
source_type=source_type,
|
|
1183
|
+
skill_name=skill_name,
|
|
1184
|
+
skill_args=skill_params_json,
|
|
1185
|
+
is_completed=True,
|
|
1186
|
+
has_error=True,
|
|
1187
|
+
)
|
|
1188
|
+
async for result in self.yield_message(answer=error_message, think=""):
|
|
1189
|
+
yield result
|
|
1190
|
+
return
|
|
1191
|
+
self.context.delete_variable(KEY_STATUS)
|
|
1192
|
+
agent_as_skill.set_context(self.context)
|
|
1193
|
+
|
|
1194
|
+
# 使用 arun 进行流式执行
|
|
1195
|
+
have_answer = False
|
|
1196
|
+
cur_agent = self.context.get_cur_agent()
|
|
1197
|
+
|
|
1198
|
+
if props is None:
|
|
1199
|
+
props = {}
|
|
1200
|
+
props.update({"gvp": self.context})
|
|
1201
|
+
try:
|
|
1202
|
+
console_skill_call(
|
|
1203
|
+
skill_name, skill_params_json, verbose=self.context.verbose, skill=skill
|
|
1204
|
+
)
|
|
1205
|
+
if agent_as_skill is not None:
|
|
1206
|
+
console_agent_skill_enter(skill_name, verbose=self.context.verbose)
|
|
1207
|
+
result = None
|
|
1208
|
+
async for result in Skillkit.arun(
|
|
1209
|
+
skill=skill,
|
|
1210
|
+
skill_params=skill_params_json if skill_params_json is not None else {},
|
|
1211
|
+
props=props,
|
|
1212
|
+
):
|
|
1213
|
+
# Debug: log result type and keys
|
|
1214
|
+
self.context.debug(
|
|
1215
|
+
f"[BasicCodeBlock.skill_run] Tool {skill_name} returned result type: {type(result)}"
|
|
1216
|
+
)
|
|
1217
|
+
if isinstance(result, dict):
|
|
1218
|
+
if "answer" in result:
|
|
1219
|
+
self.context.debug(
|
|
1220
|
+
f"[BasicCodeBlock.skill_run] answer : {result['answer']}"
|
|
1221
|
+
)
|
|
1222
|
+
|
|
1223
|
+
# Check if this is a dynamic tool response and load tools immediately
|
|
1224
|
+
if (
|
|
1225
|
+
isinstance(result, dict)
|
|
1226
|
+
and "answer" in result
|
|
1227
|
+
and isinstance(result["answer"], dict)
|
|
1228
|
+
and "_dynamic_tools" in result["answer"]
|
|
1229
|
+
):
|
|
1230
|
+
# Load dynamic tools into current skillkit
|
|
1231
|
+
self.context.info(
|
|
1232
|
+
f"[BasicCodeBlock] Detected dynamic tool response, loading tools..."
|
|
1233
|
+
)
|
|
1234
|
+
loaded_count = self._load_dynamic_tools(result["answer"])
|
|
1235
|
+
self.context.info(
|
|
1236
|
+
f"[BasicCodeBlock] Loaded {loaded_count} dynamic tools"
|
|
1237
|
+
)
|
|
1238
|
+
else:
|
|
1239
|
+
self.context.debug(
|
|
1240
|
+
f"[BasicCodeBlock.skill_run] Not a dynamic tool response (result={'dict' if isinstance(result, dict) else type(result)}, has_answer={'answer' in result if isinstance(result, dict) else False})"
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
# After tool execution, store the result in cache
|
|
1244
|
+
try:
|
|
1245
|
+
ref = self.skillkit_hook.on_tool_after_execute(skill_name, result)
|
|
1246
|
+
# Remove problematic code
|
|
1247
|
+
except Exception as e:
|
|
1248
|
+
import traceback
|
|
1249
|
+
|
|
1250
|
+
raise e
|
|
1251
|
+
|
|
1252
|
+
# Save the Reference object as raw output
|
|
1253
|
+
raw_output = ref
|
|
1254
|
+
# Process the response data to return to frontend
|
|
1255
|
+
try:
|
|
1256
|
+
result = self.skillkit_hook.on_before_reply_app(
|
|
1257
|
+
reference_id=ref.reference_id, skill=skill
|
|
1258
|
+
)
|
|
1259
|
+
except Exception as e:
|
|
1260
|
+
raise e
|
|
1261
|
+
|
|
1262
|
+
self.recorder.update(
|
|
1263
|
+
item=result,
|
|
1264
|
+
raw_output=raw_output,
|
|
1265
|
+
source_type=SourceType.SKILL,
|
|
1266
|
+
skill_name=skill_name,
|
|
1267
|
+
skill_args=skill_params_json,
|
|
1268
|
+
)
|
|
1269
|
+
|
|
1270
|
+
have_answer = True
|
|
1271
|
+
yield result
|
|
1272
|
+
|
|
1273
|
+
# Restore the original current agent after skill execution
|
|
1274
|
+
if agent_as_skill is not None:
|
|
1275
|
+
self.context.set_cur_agent(cur_agent)
|
|
1276
|
+
|
|
1277
|
+
yield self.recorder.update(
|
|
1278
|
+
item=result,
|
|
1279
|
+
source_type=SourceType.SKILL,
|
|
1280
|
+
skill_name=skill_name,
|
|
1281
|
+
skill_args=skill_params_json,
|
|
1282
|
+
is_completed=True,
|
|
1283
|
+
)
|
|
1284
|
+
if agent_as_skill is not None:
|
|
1285
|
+
console_agent_skill_exit(skill_name, verbose=self.context.verbose)
|
|
1286
|
+
except ToolInterrupt as e:
|
|
1287
|
+
# Restore original agent even in case of interruption
|
|
1288
|
+
if agent_as_skill is not None:
|
|
1289
|
+
self.context.set_cur_agent(cur_agent)
|
|
1290
|
+
|
|
1291
|
+
raise e
|
|
1292
|
+
except Exception as e:
|
|
1293
|
+
# Restore original agent even in case of exception
|
|
1294
|
+
if agent_as_skill is not None:
|
|
1295
|
+
self.context.set_cur_agent(cur_agent)
|
|
1296
|
+
|
|
1297
|
+
self.context.error(
|
|
1298
|
+
f"error in skill_run[{skill_name}], error type: {type(e)}, error info: {str(e)}"
|
|
1299
|
+
)
|
|
1300
|
+
error_message = f"调用{skill_name}工具时发生错误。错误信息: {str(e)}"
|
|
1301
|
+
self.recorder.update(
|
|
1302
|
+
item={"think": "", "answer": error_message},
|
|
1303
|
+
source_type=source_type,
|
|
1304
|
+
is_completed=True,
|
|
1305
|
+
has_error=True,
|
|
1306
|
+
)
|
|
1307
|
+
async for result in self.yield_message(answer=error_message, think=""):
|
|
1308
|
+
yield result
|
|
1309
|
+
|
|
1310
|
+
answer = self.recorder.get_answer()
|
|
1311
|
+
|
|
1312
|
+
# Optimize console output:
|
|
1313
|
+
# If this skill is an Agent (agent_as_skill is not None), we SKIP printing the response.
|
|
1314
|
+
# Reason: Sub-agents stream their output to the console during execution (Live Markdown).
|
|
1315
|
+
# Printing the final result again is redundant duplication.
|
|
1316
|
+
if agent_as_skill is None:
|
|
1317
|
+
# Ensure we pass the full answer to the UI so JSON parsing succeeds.
|
|
1318
|
+
# The UI module handles visual truncation of large structures intelligently.
|
|
1319
|
+
console_skill_response(
|
|
1320
|
+
skill_name=skill_name,
|
|
1321
|
+
response=answer,
|
|
1322
|
+
max_length=1024,
|
|
1323
|
+
verbose=self.context.verbose,
|
|
1324
|
+
skill=skill,
|
|
1325
|
+
params=skill_params_json,
|
|
1326
|
+
)
|
|
1327
|
+
self.context.debug(
|
|
1328
|
+
f"call_skill function_name[{skill_name}] "
|
|
1329
|
+
f"tool_message[{str(skill_params_json).strip()}] "
|
|
1330
|
+
f"resp[{str(self.recorder.get_progress_answers())[: self.context.get_max_answer_len()]}]"
|
|
1331
|
+
)
|
|
1332
|
+
|
|
1333
|
+
if not have_answer:
|
|
1334
|
+
self.recorder.update(
|
|
1335
|
+
item={
|
|
1336
|
+
"think": "",
|
|
1337
|
+
"answer": f"调用{skill_name}工具时未正确返回结果。",
|
|
1338
|
+
},
|
|
1339
|
+
source_type=source_type,
|
|
1340
|
+
is_completed=True,
|
|
1341
|
+
has_error=True,
|
|
1342
|
+
)
|
|
1343
|
+
|
|
1344
|
+
async def llm_chat_stream(
|
|
1345
|
+
self,
|
|
1346
|
+
llm_params: dict,
|
|
1347
|
+
recorder: Recorder | None,
|
|
1348
|
+
content: str,
|
|
1349
|
+
early_stop_on_tool_call: bool = False,
|
|
1350
|
+
on_stream_chunk=None,
|
|
1351
|
+
session_counter: int = 0,
|
|
1352
|
+
):
|
|
1353
|
+
"""
|
|
1354
|
+
LLM chat stream with optional early stopping on tool call detection.
|
|
1355
|
+
|
|
1356
|
+
Args:
|
|
1357
|
+
llm_params: LLM parameters
|
|
1358
|
+
recorder: Recorder instance
|
|
1359
|
+
content: Input content
|
|
1360
|
+
early_stop_on_tool_call: If True, stop streaming when a complete tool call is detected
|
|
1361
|
+
on_stream_chunk: Optional callback for CLI rendering.
|
|
1362
|
+
Signature: (chunk_text: str, full_text: str, is_final: bool) -> None
|
|
1363
|
+
If None, uses default console() output.
|
|
1364
|
+
session_counter: Session-level tool call batch counter for generating stable
|
|
1365
|
+
fallback tool_call_ids. Passed to StreamItem.parse_from_chunk().
|
|
1366
|
+
"""
|
|
1367
|
+
# Store the model name in context for consistency across multiple rounds
|
|
1368
|
+
if "model" in llm_params and llm_params["model"]:
|
|
1369
|
+
self.context.set_last_model_name(llm_params["model"])
|
|
1370
|
+
|
|
1371
|
+
(
|
|
1372
|
+
recorder.getProgress().add_stage(
|
|
1373
|
+
agent_name="main",
|
|
1374
|
+
stage=TypeStage.LLM,
|
|
1375
|
+
status=Status.PROCESSING,
|
|
1376
|
+
input_content=content,
|
|
1377
|
+
input_messages=llm_params["messages"],
|
|
1378
|
+
)
|
|
1379
|
+
if recorder
|
|
1380
|
+
else None
|
|
1381
|
+
)
|
|
1382
|
+
|
|
1383
|
+
assert self.llm_client, "llm_client is None"
|
|
1384
|
+
|
|
1385
|
+
tool_call_detected = False
|
|
1386
|
+
complete_tool_call = None
|
|
1387
|
+
|
|
1388
|
+
stream_item = None
|
|
1389
|
+
cur_len = 0
|
|
1390
|
+
|
|
1391
|
+
async for chunk in self.llm_client.mf_chat_stream(**llm_params):
|
|
1392
|
+
# Checkpoint: Check user interrupt during LLM streaming
|
|
1393
|
+
self.context.check_user_interrupt()
|
|
1394
|
+
|
|
1395
|
+
stream_item = StreamItem()
|
|
1396
|
+
stream_item.parse_from_chunk(chunk, session_counter=session_counter)
|
|
1397
|
+
|
|
1398
|
+
# Rendering: use callback if provided, otherwise default console output
|
|
1399
|
+
chunk_text = stream_item.answer[cur_len:]
|
|
1400
|
+
if on_stream_chunk:
|
|
1401
|
+
on_stream_chunk(
|
|
1402
|
+
chunk_text=chunk_text, full_text=stream_item.answer, is_final=False
|
|
1403
|
+
)
|
|
1404
|
+
else:
|
|
1405
|
+
# Default: simple console output
|
|
1406
|
+
console(chunk_text, verbose=self.context.is_verbose(), end="")
|
|
1407
|
+
|
|
1408
|
+
cur_len = len(stream_item.answer)
|
|
1409
|
+
|
|
1410
|
+
if recorder:
|
|
1411
|
+
recorder.update(item=stream_item, raw_output=stream_item.answer)
|
|
1412
|
+
|
|
1413
|
+
yield stream_item
|
|
1414
|
+
|
|
1415
|
+
# If a complete tool call is detected and early-stop is enabled, stop streaming.
|
|
1416
|
+
if early_stop_on_tool_call and tool_call_detected and complete_tool_call:
|
|
1417
|
+
break
|
|
1418
|
+
|
|
1419
|
+
async def yield_None(self, function_name):
|
|
1420
|
+
yield {
|
|
1421
|
+
"answer": {"answer": f"没有{function_name}工具可以调用!", "think": ""},
|
|
1422
|
+
"block_answer": "",
|
|
1423
|
+
}
|
|
1424
|
+
|
|
1425
|
+
async def yield_message(self, answer, think):
|
|
1426
|
+
yield {"answer": {"answer": answer, "think": think}, "block_answer": ""}
|
|
1427
|
+
|
|
1428
|
+
def update_recorder(
|
|
1429
|
+
self,
|
|
1430
|
+
item,
|
|
1431
|
+
source_type: SourceType,
|
|
1432
|
+
skill_name: str,
|
|
1433
|
+
skill_args: Dict[str, Any],
|
|
1434
|
+
is_completed: bool = False,
|
|
1435
|
+
):
|
|
1436
|
+
assert self.recorder, "recorder is None"
|
|
1437
|
+
if source_type == SourceType.EXPLORE:
|
|
1438
|
+
if isinstance(item, dict) and "answer" in item:
|
|
1439
|
+
if isinstance(item["answer"], dict) and "answer" in item["answer"]:
|
|
1440
|
+
self.recorder.update(
|
|
1441
|
+
item={
|
|
1442
|
+
"answer": item.get("answer", "").get("answer", ""),
|
|
1443
|
+
"think": item.get("answer", "").get("think", ""),
|
|
1444
|
+
"block_answer": item.get("block_answer", ""),
|
|
1445
|
+
},
|
|
1446
|
+
source_type=SourceType.EXPLORE,
|
|
1447
|
+
)
|
|
1448
|
+
else:
|
|
1449
|
+
self.recorder.update(
|
|
1450
|
+
item={
|
|
1451
|
+
"answer": item.get("answer", ""),
|
|
1452
|
+
"think": item.get("think", ""),
|
|
1453
|
+
"block_answer": item.get("block_answer", ""),
|
|
1454
|
+
},
|
|
1455
|
+
source_type=SourceType.EXPLORE,
|
|
1456
|
+
)
|
|
1457
|
+
else:
|
|
1458
|
+
self.recorder.update(
|
|
1459
|
+
item={"answer": item, "block_answer": item},
|
|
1460
|
+
source_type=SourceType.EXPLORE,
|
|
1461
|
+
)
|
|
1462
|
+
else:
|
|
1463
|
+
if source_type == SourceType.SKILL:
|
|
1464
|
+
self.recorder.update(
|
|
1465
|
+
item=item,
|
|
1466
|
+
source_type=source_type,
|
|
1467
|
+
skill_name=skill_name,
|
|
1468
|
+
skill_args=skill_args,
|
|
1469
|
+
is_completed=is_completed,
|
|
1470
|
+
)
|
|
1471
|
+
|
|
1472
|
+
def _variable_replace(self, content: str) -> str:
|
|
1473
|
+
variable_index_list = self.context.recognize_variable(content)
|
|
1474
|
+
|
|
1475
|
+
if variable_index_list:
|
|
1476
|
+
# 按照位置从后往前排序,避免替换时位置偏移
|
|
1477
|
+
variable_index_list.sort(key=lambda x: x[1][0], reverse=True)
|
|
1478
|
+
|
|
1479
|
+
for variable_name, (start, end) in variable_index_list:
|
|
1480
|
+
variable_value = self.context.get_variable_type(variable_name)
|
|
1481
|
+
variable_value_str = str(variable_value)
|
|
1482
|
+
|
|
1483
|
+
# 如果变量值包含特殊字符,用引号包围以避免解析问题
|
|
1484
|
+
if self.should_quote_variable_value(variable_value_str):
|
|
1485
|
+
# 如果值本身已经有引号,则不再添加
|
|
1486
|
+
if not (
|
|
1487
|
+
(
|
|
1488
|
+
variable_value_str.startswith('"')
|
|
1489
|
+
and variable_value_str.endswith('"')
|
|
1490
|
+
)
|
|
1491
|
+
or (
|
|
1492
|
+
variable_value_str.startswith("'")
|
|
1493
|
+
and variable_value_str.endswith("'")
|
|
1494
|
+
)
|
|
1495
|
+
):
|
|
1496
|
+
variable_value_str = f'"{variable_value_str}"'
|
|
1497
|
+
|
|
1498
|
+
content = content.replace(variable_name, variable_value_str)
|
|
1499
|
+
return content
|
|
1500
|
+
|
|
1501
|
+
def _save_trajectory(self, stage_name: str = "explore"):
|
|
1502
|
+
"""
|
|
1503
|
+
Save execution trajectory to file, preserving tool calls and conversation context.
|
|
1504
|
+
|
|
1505
|
+
This method should be called BEFORE bucket cleanup (removing SCRATCHPAD/QUERY)
|
|
1506
|
+
to ensure the complete conversation context including tool calls is preserved
|
|
1507
|
+
in the trajectory file.
|
|
1508
|
+
|
|
1509
|
+
The method temporarily removes history buckets to avoid duplicating full history
|
|
1510
|
+
in each stage, keeping only the current round's conversation context.
|
|
1511
|
+
|
|
1512
|
+
Called by: ExploreBlock and ExploreBlockV2 in their _update_history_and_cleanup() method
|
|
1513
|
+
|
|
1514
|
+
Args:
|
|
1515
|
+
stage_name: The name of the stage to save (default: "explore")
|
|
1516
|
+
Used as stage identifier in trajectory file
|
|
1517
|
+
"""
|
|
1518
|
+
if not hasattr(self.context, "trajectory") or self.context.trajectory is None:
|
|
1519
|
+
return
|
|
1520
|
+
|
|
1521
|
+
try:
|
|
1522
|
+
# At this point, context_manager still contains:
|
|
1523
|
+
# - SYSTEM bucket (system prompt)
|
|
1524
|
+
# - QUERY bucket (user question)
|
|
1525
|
+
# - SCRATCHPAD bucket (tool calls + tool results + assistant messages)
|
|
1526
|
+
skillkit = self.get_skillkit()
|
|
1527
|
+
tools_schema = None
|
|
1528
|
+
|
|
1529
|
+
# Get tools schema based on skillkit type
|
|
1530
|
+
if skillkit and not skillkit.isEmpty():
|
|
1531
|
+
if hasattr(skillkit, "getSkillsSchema"):
|
|
1532
|
+
tools_schema = skillkit.getSkillsSchema()
|
|
1533
|
+
elif hasattr(skillkit, "getSchemas"):
|
|
1534
|
+
tools_schema = skillkit.getSchemas()
|
|
1535
|
+
|
|
1536
|
+
# Use current recorded stages count + 1 as stage index
|
|
1537
|
+
stage_index = len(self.context.trajectory.stages) + 1
|
|
1538
|
+
|
|
1539
|
+
self.context.trajectory.finalize_stage(
|
|
1540
|
+
stage_name=stage_name,
|
|
1541
|
+
stage_index=stage_index,
|
|
1542
|
+
context_manager=self.context.context_manager,
|
|
1543
|
+
tools=tools_schema,
|
|
1544
|
+
user_id=self.context.user_id or "",
|
|
1545
|
+
model=getattr(self, "model", None),
|
|
1546
|
+
)
|
|
1547
|
+
|
|
1548
|
+
logger.debug(f"Trajectory saved for '{stage_name}' before cleanup")
|
|
1549
|
+
except Exception as e:
|
|
1550
|
+
logger.warning(
|
|
1551
|
+
f"Failed to save trajectory in _save_trajectory_before_cleanup: {e}"
|
|
1552
|
+
)
|
|
1553
|
+
|
|
1554
|
+
def _update_history_and_cleanup(self):
|
|
1555
|
+
"""
|
|
1556
|
+
Update history variable with current conversation turn and save trajectory.
|
|
1557
|
+
|
|
1558
|
+
This method performs critical post-execution cleanup for explore-type blocks:
|
|
1559
|
+
1. Extracts user question from current turn (from self.content or QUERY bucket)
|
|
1560
|
+
2. Extracts assistant answer from recorder
|
|
1561
|
+
3. Appends both to the 'history' variable as a new conversation turn
|
|
1562
|
+
4. Saves trajectory to file (BEFORE bucket cleanup to preserve tool calls)
|
|
1563
|
+
|
|
1564
|
+
Note: Bucket cleanup (SCRATCHPAD, QUERY, SYSTEM) is NOT done here.
|
|
1565
|
+
It's handled by Context.reset_for_block() at the START of next block execution.
|
|
1566
|
+
This ensures trajectory can capture complete tool call information.
|
|
1567
|
+
|
|
1568
|
+
Called by: ExploreBlock and ExploreBlockV2 after their main execution completes
|
|
1569
|
+
|
|
1570
|
+
Side effects:
|
|
1571
|
+
- Updates 'history' variable in context
|
|
1572
|
+
- Writes trajectory to file via _save_trajectory()
|
|
1573
|
+
"""
|
|
1574
|
+
if not self.recorder:
|
|
1575
|
+
return
|
|
1576
|
+
|
|
1577
|
+
logger.debug("Executing _update_history_and_cleanup...")
|
|
1578
|
+
|
|
1579
|
+
# Extract user content from self.content or QUERY bucket
|
|
1580
|
+
user_content = self.content
|
|
1581
|
+
if not user_content and self.context.context_manager:
|
|
1582
|
+
bucket = self.context.context_manager.state.buckets.get(
|
|
1583
|
+
BuildInBucket.QUERY.value
|
|
1584
|
+
)
|
|
1585
|
+
if bucket:
|
|
1586
|
+
user_content = bucket._get_content_text()
|
|
1587
|
+
|
|
1588
|
+
answer_content = self.recorder.get_answer()
|
|
1589
|
+
|
|
1590
|
+
logger.debug(
|
|
1591
|
+
f"Cleanup: user_content found: {bool(user_content)}, answer_content found: {bool(answer_content)}"
|
|
1592
|
+
)
|
|
1593
|
+
|
|
1594
|
+
# Only record if we have both user content and an answer
|
|
1595
|
+
if user_content and answer_content:
|
|
1596
|
+
history_raw = self.context.get_history_messages(normalize=False)
|
|
1597
|
+
|
|
1598
|
+
# Convert to list format if needed (handle different return types)
|
|
1599
|
+
if history_raw is None:
|
|
1600
|
+
history_list = []
|
|
1601
|
+
elif isinstance(history_raw, Messages):
|
|
1602
|
+
# Convert Messages object to list of dicts
|
|
1603
|
+
history_list = history_raw.get_messages_as_dict()
|
|
1604
|
+
elif isinstance(history_raw, list):
|
|
1605
|
+
history_list = history_raw
|
|
1606
|
+
else:
|
|
1607
|
+
logger.warning(
|
|
1608
|
+
f"Unexpected history type: {type(history_raw)}, initializing as empty list"
|
|
1609
|
+
)
|
|
1610
|
+
history_list = []
|
|
1611
|
+
|
|
1612
|
+
# Collect pinned tool responses (in-order) and persist them into history.
|
|
1613
|
+
# Minimal rule: any scratchpad message containing PIN_MARKER is considered user-intended persistence.
|
|
1614
|
+
# We scan _scratchpad first; if unavailable, fall back to merged messages from context_manager.
|
|
1615
|
+
pinned_contents: list[str] = []
|
|
1616
|
+
try:
|
|
1617
|
+
cm = self.context.context_manager
|
|
1618
|
+
|
|
1619
|
+
existing_contents = {
|
|
1620
|
+
item.get("content")
|
|
1621
|
+
for item in history_list
|
|
1622
|
+
if isinstance(item, dict) and isinstance(item.get("content"), str)
|
|
1623
|
+
}
|
|
1624
|
+
|
|
1625
|
+
def _collect_from_messages(msgs: Messages):
|
|
1626
|
+
for msg in msgs.get_messages():
|
|
1627
|
+
role = getattr(msg, "role", None)
|
|
1628
|
+
# Tool-call mode uses MessageRole.TOOL; prompt mode tool results can be user-role text.
|
|
1629
|
+
if role not in (MessageRole.TOOL, MessageRole.USER):
|
|
1630
|
+
continue
|
|
1631
|
+
content = getattr(msg, "content", "") or ""
|
|
1632
|
+
if PIN_MARKER not in content:
|
|
1633
|
+
continue
|
|
1634
|
+
cleaned = content.replace(PIN_MARKER, "").strip()
|
|
1635
|
+
if not cleaned or cleaned in existing_contents:
|
|
1636
|
+
continue
|
|
1637
|
+
pinned_contents.append(cleaned)
|
|
1638
|
+
existing_contents.add(cleaned)
|
|
1639
|
+
|
|
1640
|
+
scratch_bucket = (
|
|
1641
|
+
cm.state.buckets.get(BuildInBucket.SCRATCHPAD.value)
|
|
1642
|
+
if cm and hasattr(cm, "state")
|
|
1643
|
+
else None
|
|
1644
|
+
)
|
|
1645
|
+
scratch_content = (
|
|
1646
|
+
getattr(scratch_bucket, "content", None) if scratch_bucket else None
|
|
1647
|
+
)
|
|
1648
|
+
|
|
1649
|
+
if isinstance(scratch_content, Messages):
|
|
1650
|
+
_collect_from_messages(scratch_content)
|
|
1651
|
+
elif cm:
|
|
1652
|
+
merged = cm.to_dph_messages()
|
|
1653
|
+
if isinstance(merged, Messages):
|
|
1654
|
+
_collect_from_messages(merged)
|
|
1655
|
+
except Exception as e:
|
|
1656
|
+
logger.warning(f"Failed to extract pinned messages: {e}")
|
|
1657
|
+
|
|
1658
|
+
# Add User Message with timestamp
|
|
1659
|
+
history_list.append(
|
|
1660
|
+
{
|
|
1661
|
+
"role": MessageRole.USER.value,
|
|
1662
|
+
"content": user_content,
|
|
1663
|
+
"timestamp": datetime.now().isoformat(),
|
|
1664
|
+
}
|
|
1665
|
+
)
|
|
1666
|
+
|
|
1667
|
+
# Insert pinned messages after user input, before assistant final answer.
|
|
1668
|
+
# Use ASSISTANT role to keep message-role ordering compatible with common chat APIs.
|
|
1669
|
+
for pinned in pinned_contents:
|
|
1670
|
+
history_list.append(
|
|
1671
|
+
{
|
|
1672
|
+
"role": MessageRole.ASSISTANT.value,
|
|
1673
|
+
"content": pinned,
|
|
1674
|
+
"timestamp": datetime.now().isoformat(),
|
|
1675
|
+
"metadata": {"pinned": True, "source": "tool"},
|
|
1676
|
+
}
|
|
1677
|
+
)
|
|
1678
|
+
|
|
1679
|
+
# Add Assistant Message with timestamp
|
|
1680
|
+
history_list.append(
|
|
1681
|
+
{
|
|
1682
|
+
"role": MessageRole.ASSISTANT.value,
|
|
1683
|
+
"content": answer_content,
|
|
1684
|
+
"timestamp": datetime.now().isoformat(),
|
|
1685
|
+
}
|
|
1686
|
+
)
|
|
1687
|
+
|
|
1688
|
+
self.context.set_variable("history", history_list)
|
|
1689
|
+
logger.debug("Cleanup: History variable updated.")
|
|
1690
|
+
|
|
1691
|
+
# Save trajectory BEFORE cleaning up buckets (so tool calls are preserved)
|
|
1692
|
+
self._save_trajectory(stage_name="explore")
|
|
1693
|
+
|
|
1694
|
+
def _load_dynamic_tools(self, result) -> int:
|
|
1695
|
+
"""
|
|
1696
|
+
Load dynamic tools into current skillkit (unified implementation for all explore modes)
|
|
1697
|
+
|
|
1698
|
+
Args:
|
|
1699
|
+
result: Response result containing _dynamic_tools (dict or string)
|
|
1700
|
+
|
|
1701
|
+
Returns:
|
|
1702
|
+
int: Number of successfully loaded tools
|
|
1703
|
+
"""
|
|
1704
|
+
from dolphin.core.skill.skillset import Skillset
|
|
1705
|
+
import json
|
|
1706
|
+
|
|
1707
|
+
# Parse result if it's a string
|
|
1708
|
+
if isinstance(result, str):
|
|
1709
|
+
try:
|
|
1710
|
+
result = json.loads(result)
|
|
1711
|
+
except:
|
|
1712
|
+
self.context.error(f"Failed to parse dynamic tool response: {result}")
|
|
1713
|
+
return 0
|
|
1714
|
+
|
|
1715
|
+
if not isinstance(result, dict):
|
|
1716
|
+
self.context.error(f"Invalid dynamic tool response type: {type(result)}")
|
|
1717
|
+
return 0
|
|
1718
|
+
_dynamic_tools = result.get("_dynamic_tools", [])
|
|
1719
|
+
provider_name = result.get("provider", "unknown")
|
|
1720
|
+
headers = result.get("headers", {}) # 提取 headers
|
|
1721
|
+
|
|
1722
|
+
if not _dynamic_tools:
|
|
1723
|
+
self.context.debug("No dynamic tools to load")
|
|
1724
|
+
return 0
|
|
1725
|
+
|
|
1726
|
+
if headers:
|
|
1727
|
+
self.context.info(
|
|
1728
|
+
f"Loading {len(_dynamic_tools)} dynamic tools from {provider_name} with headers: {headers}..."
|
|
1729
|
+
)
|
|
1730
|
+
else:
|
|
1731
|
+
self.context.info(
|
|
1732
|
+
f"Loading {len(_dynamic_tools)} dynamic tools from {provider_name}..."
|
|
1733
|
+
)
|
|
1734
|
+
|
|
1735
|
+
# Get current skillkit
|
|
1736
|
+
current_skillkit = self.context.skillkit
|
|
1737
|
+
|
|
1738
|
+
# If current skillkit is not a Skillset, create a new Skillset and merge
|
|
1739
|
+
if not isinstance(current_skillkit, Skillset):
|
|
1740
|
+
self.context.debug(
|
|
1741
|
+
f"Current skillkit is {type(current_skillkit).__name__}, converting to Skillset"
|
|
1742
|
+
)
|
|
1743
|
+
new_skillset = Skillset()
|
|
1744
|
+
# Add existing tools
|
|
1745
|
+
for skill in current_skillkit.getSkills():
|
|
1746
|
+
new_skillset.addSkill(skill)
|
|
1747
|
+
current_skillkit = new_skillset
|
|
1748
|
+
self.context.set_skills(current_skillkit)
|
|
1749
|
+
|
|
1750
|
+
# Add new tools
|
|
1751
|
+
loaded_count = 0
|
|
1752
|
+
for i, tool_def in enumerate(_dynamic_tools):
|
|
1753
|
+
try:
|
|
1754
|
+
tool_name = tool_def.get("name", "unknown")
|
|
1755
|
+
tool_instance = None
|
|
1756
|
+
|
|
1757
|
+
# 判断工具类型并创建实例
|
|
1758
|
+
if "tool_instance" in tool_def:
|
|
1759
|
+
# 类型 1: 预实例化工具(本地函数包装或其他)
|
|
1760
|
+
tool_instance = tool_def["tool_instance"]
|
|
1761
|
+
self.context.debug(f"Loading pre-instantiated tool: {tool_name}")
|
|
1762
|
+
|
|
1763
|
+
elif "api_call_strategy" in tool_def:
|
|
1764
|
+
# 类型 2: API 工具 - 自动创建 DynamicAPISkillFunction
|
|
1765
|
+
from dolphin.core.skill.skill_function import (
|
|
1766
|
+
DynamicAPISkillFunction,
|
|
1767
|
+
)
|
|
1768
|
+
|
|
1769
|
+
api_url = tool_def.get("api_url")
|
|
1770
|
+
description = tool_def.get("description", "")
|
|
1771
|
+
parameters = tool_def.get("parameters", {})
|
|
1772
|
+
original_schema = tool_def.get("original_schema", {})
|
|
1773
|
+
fixed_params = tool_def.get("fixed_params", {})
|
|
1774
|
+
api_call_strategy = tool_def.get("api_call_strategy")
|
|
1775
|
+
|
|
1776
|
+
self.context.debug(
|
|
1777
|
+
f"Creating DynamicAPISkillFunction for: {tool_name}, "
|
|
1778
|
+
f"api_url={api_url}, api_call_strategy={api_call_strategy}, "
|
|
1779
|
+
f"fixed_params={fixed_params}, headers={headers}"
|
|
1780
|
+
)
|
|
1781
|
+
|
|
1782
|
+
# Bind tool execution policy into the app strategy slot so the tool implementation can branch if needed.
|
|
1783
|
+
if api_call_strategy:
|
|
1784
|
+
result_process_strategies = [
|
|
1785
|
+
{"strategy": str(api_call_strategy), "category": "app"},
|
|
1786
|
+
{"strategy": "default", "category": "llm"},
|
|
1787
|
+
]
|
|
1788
|
+
else:
|
|
1789
|
+
result_process_strategies = None
|
|
1790
|
+
|
|
1791
|
+
tool_instance = DynamicAPISkillFunction(
|
|
1792
|
+
name=tool_name,
|
|
1793
|
+
description=description,
|
|
1794
|
+
parameters=parameters,
|
|
1795
|
+
api_url=api_url,
|
|
1796
|
+
original_schema=original_schema,
|
|
1797
|
+
fixed_params=fixed_params,
|
|
1798
|
+
headers=headers,
|
|
1799
|
+
result_process_strategies=result_process_strategies,
|
|
1800
|
+
owner_skillkit=current_skillkit,
|
|
1801
|
+
)
|
|
1802
|
+
|
|
1803
|
+
else:
|
|
1804
|
+
self.context.error(
|
|
1805
|
+
f"Dynamic tool '{tool_name}' must provide either 'tool_instance' or 'api_call_strategy'. "
|
|
1806
|
+
f"For pre-wrapped tools, provide 'tool_instance'. "
|
|
1807
|
+
f"For API tools, provide 'api_call_strategy', 'api_url', 'original_schema', and optionally 'fixed_params'."
|
|
1808
|
+
)
|
|
1809
|
+
continue
|
|
1810
|
+
|
|
1811
|
+
# Add tool instance to skillkit
|
|
1812
|
+
current_skillkit.addSkill(tool_instance)
|
|
1813
|
+
loaded_count += 1
|
|
1814
|
+
|
|
1815
|
+
# CRITICAL FIX: Also update self.skills if it exists (for ExploreBlock to see new tools)
|
|
1816
|
+
if hasattr(self, "skills"):
|
|
1817
|
+
if isinstance(self.skills, list):
|
|
1818
|
+
if tool_name not in self.skills:
|
|
1819
|
+
self.skills.append(tool_name)
|
|
1820
|
+
self.context.debug(
|
|
1821
|
+
f"[BasicCodeBlock] Added {tool_name} to self.skills"
|
|
1822
|
+
)
|
|
1823
|
+
else:
|
|
1824
|
+
self.context.debug(
|
|
1825
|
+
f"[_load_dynamic_tools] self.skills is not a list, cannot append"
|
|
1826
|
+
)
|
|
1827
|
+
else:
|
|
1828
|
+
self.context.debug(
|
|
1829
|
+
f"[_load_dynamic_tools] self.skills does not exist on {type(self).__name__}"
|
|
1830
|
+
)
|
|
1831
|
+
|
|
1832
|
+
self.context.debug(f"✓ Dynamically loaded tool: {tool_name}")
|
|
1833
|
+
|
|
1834
|
+
except Exception as e:
|
|
1835
|
+
tool_name = (
|
|
1836
|
+
tool_def.get("name", "unknown")
|
|
1837
|
+
if isinstance(tool_def, dict)
|
|
1838
|
+
else "unknown"
|
|
1839
|
+
)
|
|
1840
|
+
self.context.error(f"✗ Failed to load dynamic tool {tool_name}: {e}")
|
|
1841
|
+
import traceback
|
|
1842
|
+
|
|
1843
|
+
self.context.debug(f"Error traceback: {traceback.format_exc()}")
|
|
1844
|
+
|
|
1845
|
+
self.context.info(
|
|
1846
|
+
f"Successfully loaded {loaded_count}/{len(_dynamic_tools)} dynamic tools"
|
|
1847
|
+
)
|
|
1848
|
+
|
|
1849
|
+
# CRITICAL: Recalculate all_skills to include newly loaded tools
|
|
1850
|
+
# Context.get_skillkit() matches tools from all_skills, must recalculate
|
|
1851
|
+
if hasattr(self.context, "_calc_all_skills"):
|
|
1852
|
+
self.context._calc_all_skills()
|
|
1853
|
+
self.context.debug(
|
|
1854
|
+
f"[_load_dynamic_tools] all_skills updated, now has {len(list(self.context.all_skills.getSkillNames()))} tools"
|
|
1855
|
+
)
|
|
1856
|
+
self.context.debug(
|
|
1857
|
+
"[BasicCodeBlock] Recalculated all_skills after loading dynamic tools"
|
|
1858
|
+
)
|
|
1859
|
+
|
|
1860
|
+
# Log current available tools (for debugging)
|
|
1861
|
+
if self.context.is_verbose():
|
|
1862
|
+
all_tools = list(current_skillkit.getSkillNames())
|
|
1863
|
+
self.context.debug(f"Current available tools: {all_tools}")
|
|
1864
|
+
|
|
1865
|
+
return loaded_count
|