kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,926 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, Dict, Any, List
|
|
2
|
+
from dolphin.core.common.enums import Status, TypeStage
|
|
3
|
+
from dolphin.core.runtime.runtime_instance import (
|
|
4
|
+
AgentInstance,
|
|
5
|
+
BlockInstance,
|
|
6
|
+
ProgressInstance,
|
|
7
|
+
StageInstance,
|
|
8
|
+
TypeRuntimeInstance,
|
|
9
|
+
)
|
|
10
|
+
from dolphin.core.logging.logger import console
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from dolphin.core.agent.base_agent import BaseAgent
|
|
14
|
+
from dolphin.core.code_block.basic_code_block import BasicCodeBlock
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class RuntimeGraph:
|
|
18
|
+
@staticmethod
|
|
19
|
+
def is_llm_stage(instance):
|
|
20
|
+
"""
|
|
21
|
+
Check if a runtime instance represents an LLM stage.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
instance: Runtime instance to check
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
bool: True if instance is an LLM stage, False otherwise
|
|
28
|
+
"""
|
|
29
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
30
|
+
from dolphin.core.common.enums import TypeStage
|
|
31
|
+
|
|
32
|
+
if instance.type != TypeRuntimeInstance.STAGE:
|
|
33
|
+
return False
|
|
34
|
+
stage_value = getattr(instance, "stage", None)
|
|
35
|
+
# Handle both enum and string values for backward compatibility
|
|
36
|
+
if stage_value == TypeStage.LLM:
|
|
37
|
+
return True
|
|
38
|
+
if isinstance(stage_value, str) and stage_value.lower() == "llm":
|
|
39
|
+
return True
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
def __init__(self):
|
|
43
|
+
self.visible_instances = []
|
|
44
|
+
|
|
45
|
+
self.cur_agent: AgentInstance | None = None
|
|
46
|
+
self.cur_block: BlockInstance | None = None
|
|
47
|
+
self.cur_progress: ProgressInstance | None = None
|
|
48
|
+
self.cur_stage: StageInstance | None = None
|
|
49
|
+
|
|
50
|
+
def set_agent(self, agent: "BaseAgent"):
|
|
51
|
+
# Check if this agent already exists to avoid duplicates
|
|
52
|
+
agent_name = agent.name if agent else "_default_agent_"
|
|
53
|
+
|
|
54
|
+
new_agent_instance = AgentInstance(name=agent_name, agent=agent)
|
|
55
|
+
|
|
56
|
+
if self.cur_stage is not None:
|
|
57
|
+
new_agent_instance.set_parent(self.cur_stage)
|
|
58
|
+
|
|
59
|
+
# Update current agent and add to instances
|
|
60
|
+
self.cur_agent = new_agent_instance
|
|
61
|
+
self.visible_instances.append(self.cur_agent)
|
|
62
|
+
|
|
63
|
+
def set_block(self, block: "BasicCodeBlock"):
|
|
64
|
+
self.cur_block = BlockInstance(name=block.name, block=block)
|
|
65
|
+
if self.cur_agent:
|
|
66
|
+
self.cur_block.set_parent(self.cur_agent)
|
|
67
|
+
self.visible_instances.append(self.cur_block)
|
|
68
|
+
|
|
69
|
+
def set_progress(self, progress: ProgressInstance):
|
|
70
|
+
assert self.cur_block is not None, "Block is not set"
|
|
71
|
+
|
|
72
|
+
self.cur_progress = progress
|
|
73
|
+
self.cur_progress.set_parent(self.cur_block)
|
|
74
|
+
self.visible_instances.append(self.cur_progress)
|
|
75
|
+
|
|
76
|
+
def set_stage(self, stage: StageInstance, pop_last_stage: bool = False):
|
|
77
|
+
assert self.cur_progress is not None, "Progress is not set"
|
|
78
|
+
|
|
79
|
+
if pop_last_stage:
|
|
80
|
+
self.visible_instances.pop()
|
|
81
|
+
|
|
82
|
+
self.cur_stage = stage
|
|
83
|
+
self.visible_instances.append(self.cur_stage)
|
|
84
|
+
|
|
85
|
+
def get_all_stages(self):
|
|
86
|
+
return [
|
|
87
|
+
i.get_traditional_dict()
|
|
88
|
+
for i in self.visible_instances
|
|
89
|
+
if i.type == TypeRuntimeInstance.STAGE
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
def copy(self):
|
|
93
|
+
copied = RuntimeGraph()
|
|
94
|
+
copied.visible_instances = self.visible_instances.copy()
|
|
95
|
+
# Copy current state to maintain hierarchical relationships during agent calls
|
|
96
|
+
copied.cur_agent = self.cur_agent
|
|
97
|
+
copied.cur_block = self.cur_block
|
|
98
|
+
copied.cur_progress = self.cur_progress
|
|
99
|
+
copied.cur_stage = self.cur_stage
|
|
100
|
+
return copied
|
|
101
|
+
|
|
102
|
+
def get_instances(self):
|
|
103
|
+
return self.visible_instances
|
|
104
|
+
|
|
105
|
+
def get_call_chain_string(self, title="Dolphin Runtime Call Chain"):
|
|
106
|
+
"""
|
|
107
|
+
Get human-readable call chain visualization as a string
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
title (str): Title to display at top of output
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
str: Call chain visualization as string
|
|
114
|
+
"""
|
|
115
|
+
lines = []
|
|
116
|
+
lines.append(f"{'=' * 60}")
|
|
117
|
+
lines.append(f"{title:^60}")
|
|
118
|
+
lines.append(f"{'=' * 60}")
|
|
119
|
+
|
|
120
|
+
if not self.visible_instances:
|
|
121
|
+
lines.append("No runtime instances found")
|
|
122
|
+
return "\n".join(lines)
|
|
123
|
+
|
|
124
|
+
# Group instances by their hierarchical structure
|
|
125
|
+
root_instances = [
|
|
126
|
+
instance for instance in self.visible_instances if instance.parent is None
|
|
127
|
+
]
|
|
128
|
+
|
|
129
|
+
if not root_instances:
|
|
130
|
+
lines.append("No root instances found")
|
|
131
|
+
return "\n".join(lines)
|
|
132
|
+
|
|
133
|
+
for root in root_instances:
|
|
134
|
+
self._append_instance_tree(root, 0, lines)
|
|
135
|
+
|
|
136
|
+
lines.append(f"{'=' * 60}")
|
|
137
|
+
lines.append(f"Total instances: {len(self.visible_instances)}")
|
|
138
|
+
|
|
139
|
+
# Add summary statistics
|
|
140
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
141
|
+
|
|
142
|
+
agents = sum(
|
|
143
|
+
1 for i in self.visible_instances if i.type == TypeRuntimeInstance.AGENT
|
|
144
|
+
)
|
|
145
|
+
blocks = sum(
|
|
146
|
+
1 for i in self.visible_instances if i.type == TypeRuntimeInstance.BLOCK
|
|
147
|
+
)
|
|
148
|
+
progresses = sum(
|
|
149
|
+
1 for i in self.visible_instances if i.type == TypeRuntimeInstance.PROGRESS
|
|
150
|
+
)
|
|
151
|
+
stages = sum(
|
|
152
|
+
1 for i in self.visible_instances if i.type == TypeRuntimeInstance.STAGE
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
lines.append(
|
|
156
|
+
f"Summary: {agents} Agents, {blocks} Blocks, {progresses} Progresses, {stages} Stages"
|
|
157
|
+
)
|
|
158
|
+
lines.append(f"{'=' * 60}")
|
|
159
|
+
|
|
160
|
+
return "\n".join(lines)
|
|
161
|
+
|
|
162
|
+
def print_call_chain(self, title="Dolphin Runtime Call Chain"):
|
|
163
|
+
"""
|
|
164
|
+
Print human-readable call chain visualization
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
title (str): Title to display at top of output
|
|
168
|
+
"""
|
|
169
|
+
console(self.get_call_chain_string(title))
|
|
170
|
+
|
|
171
|
+
def _append_instance_tree(self, instance, depth, lines):
|
|
172
|
+
"""
|
|
173
|
+
Recursively append instance tree with proper indentation to lines list
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
instance: Runtime instance to append
|
|
177
|
+
depth (int): Current depth level for indentation
|
|
178
|
+
lines (list): List to append formatted lines to
|
|
179
|
+
"""
|
|
180
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
181
|
+
|
|
182
|
+
# Create indentation
|
|
183
|
+
indent = " " * depth
|
|
184
|
+
tree_char = "├─" if depth > 0 else ""
|
|
185
|
+
|
|
186
|
+
# Format instance info based on type
|
|
187
|
+
if instance.type == TypeRuntimeInstance.AGENT:
|
|
188
|
+
icon = "🤖"
|
|
189
|
+
name = getattr(instance, "name", "Unknown")
|
|
190
|
+
detail = f"Agent[{name}]"
|
|
191
|
+
|
|
192
|
+
elif instance.type == TypeRuntimeInstance.BLOCK:
|
|
193
|
+
icon = "📦"
|
|
194
|
+
name = getattr(instance, "name", "Unknown")
|
|
195
|
+
detail = f"Block[{name}]"
|
|
196
|
+
|
|
197
|
+
elif instance.type == TypeRuntimeInstance.PROGRESS:
|
|
198
|
+
icon = "⚡"
|
|
199
|
+
stage_count = len(getattr(instance, "stages", []))
|
|
200
|
+
detail = f"Progress[{instance.id[:8]}] ({stage_count} stages)"
|
|
201
|
+
|
|
202
|
+
elif instance.type == TypeRuntimeInstance.STAGE:
|
|
203
|
+
icon = "🔄"
|
|
204
|
+
stage_type = getattr(instance, "stage", "Unknown")
|
|
205
|
+
status = getattr(instance, "status", "Unknown")
|
|
206
|
+
# Handle stage_type safely
|
|
207
|
+
if hasattr(stage_type, "value"):
|
|
208
|
+
stage_value = getattr(stage_type, "value")
|
|
209
|
+
else:
|
|
210
|
+
stage_value = str(stage_type)
|
|
211
|
+
|
|
212
|
+
if stage_type == TypeStage.SKILL:
|
|
213
|
+
detail = (
|
|
214
|
+
f"Stage[{instance.id[:8]}/{stage_value}/{instance.skill_info.name}]"
|
|
215
|
+
)
|
|
216
|
+
else:
|
|
217
|
+
detail = f"Stage[{instance.id[:8]}/{stage_value}]"
|
|
218
|
+
|
|
219
|
+
detail = f"{detail} - time[{instance.end_time - instance.start_time:.2f}s]"
|
|
220
|
+
|
|
221
|
+
# Check if this is an LLM stage
|
|
222
|
+
is_llm = self.is_llm_stage(instance)
|
|
223
|
+
|
|
224
|
+
if is_llm:
|
|
225
|
+
detail = f"{detail} - estimated_input[{instance.get_estimated_input_tokens()}]"
|
|
226
|
+
detail = f"{detail} - estimated_output[{instance.get_estimated_output_tokens()}]"
|
|
227
|
+
|
|
228
|
+
# Change assert to warning for incomplete stages
|
|
229
|
+
if status != Status.COMPLETED:
|
|
230
|
+
detail = f"{detail} - WARNING: Status[{status}] (not completed)"
|
|
231
|
+
else:
|
|
232
|
+
detail = f"{status}"
|
|
233
|
+
|
|
234
|
+
else:
|
|
235
|
+
icon = "❓"
|
|
236
|
+
detail = f"Unknown[{instance.type}]"
|
|
237
|
+
|
|
238
|
+
lines.append(f"{indent}{tree_char} {icon} {detail}")
|
|
239
|
+
|
|
240
|
+
# Append children recursively
|
|
241
|
+
for child in instance.children:
|
|
242
|
+
self._append_instance_tree(child, depth + 1, lines)
|
|
243
|
+
|
|
244
|
+
def _split_content_by_keywords(self, content: str) -> list:
|
|
245
|
+
"""Split content by special keywords and calculate ratios."""
|
|
246
|
+
if not content or not content.strip():
|
|
247
|
+
return [("", 0.0)]
|
|
248
|
+
|
|
249
|
+
# Replace newlines with \n for display
|
|
250
|
+
content = content.replace("\n", "\\n")
|
|
251
|
+
|
|
252
|
+
# For very short content, don't split
|
|
253
|
+
if len(content) <= 100:
|
|
254
|
+
return [(content, 100.0)]
|
|
255
|
+
|
|
256
|
+
# Initialize result list to store (line, ratio) tuples
|
|
257
|
+
result = []
|
|
258
|
+
|
|
259
|
+
# Split by special patterns - handle tool calls as complete blocks
|
|
260
|
+
parts = []
|
|
261
|
+
current = []
|
|
262
|
+
lines = content.split("\\n")
|
|
263
|
+
in_tool_call = False
|
|
264
|
+
|
|
265
|
+
for line in lines:
|
|
266
|
+
# Check for tool call start
|
|
267
|
+
if line.startswith("=>#"):
|
|
268
|
+
in_tool_call = True
|
|
269
|
+
if current:
|
|
270
|
+
parts.append("\\n".join(current))
|
|
271
|
+
current = []
|
|
272
|
+
current.append(line)
|
|
273
|
+
# Check for tool call end (answer tag)
|
|
274
|
+
elif in_tool_call and line.endswith("</answer>"):
|
|
275
|
+
current.append(line)
|
|
276
|
+
parts.append("\\n".join(current))
|
|
277
|
+
current = []
|
|
278
|
+
in_tool_call = False
|
|
279
|
+
# Check for other special patterns when not in tool call
|
|
280
|
+
elif not in_tool_call and (
|
|
281
|
+
line.startswith("##") or line.startswith("```") or line.endswith("```")
|
|
282
|
+
):
|
|
283
|
+
if current:
|
|
284
|
+
parts.append("\\n".join(current))
|
|
285
|
+
current = []
|
|
286
|
+
parts.append(line)
|
|
287
|
+
else:
|
|
288
|
+
current.append(line)
|
|
289
|
+
|
|
290
|
+
if current:
|
|
291
|
+
parts.append("\\n".join(current))
|
|
292
|
+
|
|
293
|
+
# Calculate total length for ratio calculation
|
|
294
|
+
total_length = len(content)
|
|
295
|
+
if total_length == 0:
|
|
296
|
+
return [("", 0.0)]
|
|
297
|
+
|
|
298
|
+
# Calculate ratio for each part
|
|
299
|
+
for part in parts:
|
|
300
|
+
if part.strip():
|
|
301
|
+
ratio = len(part) / total_length * 100
|
|
302
|
+
result.append((part, ratio))
|
|
303
|
+
|
|
304
|
+
return result if result else [(content, 100.0)]
|
|
305
|
+
|
|
306
|
+
def _build_structured_call_chain_node(self, instance) -> Dict[str, Any]:
|
|
307
|
+
"""
|
|
308
|
+
Recursively build a structured dictionary representation of the call chain node.
|
|
309
|
+
"""
|
|
310
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
311
|
+
|
|
312
|
+
node_data = {
|
|
313
|
+
"type": instance.type.value,
|
|
314
|
+
"id": instance.id[:8],
|
|
315
|
+
"name": getattr(instance, "name", None),
|
|
316
|
+
"status": getattr(instance, "status", None).value if getattr(instance, "status", None) else None,
|
|
317
|
+
"duration": (instance.end_time - instance.start_time) if instance.end_time and instance.start_time else 0.0,
|
|
318
|
+
"children": [],
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if instance.type == TypeRuntimeInstance.AGENT:
|
|
322
|
+
node_data["name"] = getattr(instance, "name", "Unknown")
|
|
323
|
+
elif instance.type == TypeRuntimeInstance.BLOCK:
|
|
324
|
+
node_data["name"] = getattr(instance, "name", "Unknown")
|
|
325
|
+
elif instance.type == TypeRuntimeInstance.PROGRESS:
|
|
326
|
+
node_data["stage_count"] = len(getattr(instance, "stages", []))
|
|
327
|
+
elif instance.type == TypeRuntimeInstance.STAGE:
|
|
328
|
+
stage_type_val = getattr(instance, "stage", "Unknown")
|
|
329
|
+
if hasattr(stage_type_val, "value"):
|
|
330
|
+
stage_type_val = stage_type_val.value
|
|
331
|
+
node_data["stage_type"] = stage_type_val
|
|
332
|
+
node_data["skill_name"] = instance.skill_info.name if stage_type_val == TypeStage.SKILL and instance.skill_info else None
|
|
333
|
+
|
|
334
|
+
is_llm = self.is_llm_stage(instance)
|
|
335
|
+
if is_llm:
|
|
336
|
+
node_data["is_llm_stage"] = True
|
|
337
|
+
node_data["estimated_input_tokens"] = instance.get_estimated_input_tokens()
|
|
338
|
+
node_data["estimated_output_tokens"] = instance.get_estimated_output_tokens()
|
|
339
|
+
if hasattr(instance, "input") and instance.input:
|
|
340
|
+
node_data["input_content"] = getattr(instance.input, "content", None)
|
|
341
|
+
node_data["input_messages"] = [msg.to_dict() for msg in instance.input.messages.get_messages()] if getattr(instance.input, "messages", None) else []
|
|
342
|
+
if hasattr(instance, "output") and instance.output:
|
|
343
|
+
node_data["answer"] = getattr(instance.output, "answer", None)
|
|
344
|
+
node_data["think"] = getattr(instance.output, "think", None)
|
|
345
|
+
node_data["raw_output"] = getattr(instance.output, "raw_output", None)
|
|
346
|
+
else:
|
|
347
|
+
node_data["is_llm_stage"] = False
|
|
348
|
+
|
|
349
|
+
for child in instance.children:
|
|
350
|
+
node_data["children"].append(self._build_structured_call_chain_node(child))
|
|
351
|
+
|
|
352
|
+
return node_data
|
|
353
|
+
|
|
354
|
+
def _is_skill_stage(self, instance):
|
|
355
|
+
"""Helper to identify skill stages."""
|
|
356
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
357
|
+
if instance.type != TypeRuntimeInstance.STAGE:
|
|
358
|
+
return False
|
|
359
|
+
stage_value = getattr(instance, "stage", None)
|
|
360
|
+
return (stage_value == TypeStage.SKILL) or (isinstance(stage_value, str) and stage_value.lower() == "skill")
|
|
361
|
+
|
|
362
|
+
def profile(self, title="Dolphin Runtime Profile") -> Dict[str, Any]:
|
|
363
|
+
"""
|
|
364
|
+
Generate comprehensive profile information including call chain and LLM details as a structured dictionary.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
title (str): Title for the profile
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Dict[str, Any]: Comprehensive profile as a structured dictionary.
|
|
371
|
+
"""
|
|
372
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
373
|
+
|
|
374
|
+
profile_data = {
|
|
375
|
+
"title": title,
|
|
376
|
+
"call_chain": [],
|
|
377
|
+
"llm_interactions": [],
|
|
378
|
+
"llm_summary": {},
|
|
379
|
+
"skill_interactions": [],
|
|
380
|
+
"skill_summary": {},
|
|
381
|
+
"execution_summary": {},
|
|
382
|
+
"context_information": {},
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
# --- Call Chain ---
|
|
386
|
+
root_instances = [
|
|
387
|
+
instance for instance in self.visible_instances if instance.parent is None
|
|
388
|
+
]
|
|
389
|
+
for root in root_instances:
|
|
390
|
+
profile_data["call_chain"].append(self._build_structured_call_chain_node(root))
|
|
391
|
+
|
|
392
|
+
# --- LLM and Skill Stages Collection ---
|
|
393
|
+
def _collect_all_stages_from_tree():
|
|
394
|
+
roots = [i for i in self.visible_instances if i.parent is None]
|
|
395
|
+
collected = []
|
|
396
|
+
def dfs(node):
|
|
397
|
+
if node.type == TypeRuntimeInstance.STAGE:
|
|
398
|
+
collected.append(node)
|
|
399
|
+
for child in getattr(node, "children", []) or []:
|
|
400
|
+
dfs(child)
|
|
401
|
+
for r in roots:
|
|
402
|
+
dfs(r)
|
|
403
|
+
return collected
|
|
404
|
+
|
|
405
|
+
stages_from_tree = _collect_all_stages_from_tree()
|
|
406
|
+
llm_stages = [i for i in stages_from_tree if self.is_llm_stage(i)]
|
|
407
|
+
skill_stages = [i for i in stages_from_tree if self._is_skill_stage(i)]
|
|
408
|
+
|
|
409
|
+
# --- LLM Interactions Details ---
|
|
410
|
+
total_input_tokens = 0
|
|
411
|
+
total_output_tokens = 0
|
|
412
|
+
total_llm_time = 0
|
|
413
|
+
|
|
414
|
+
for stage in llm_stages:
|
|
415
|
+
# Status is only PROCESSING / COMPLETED / FAILED, unknown status falls back to string
|
|
416
|
+
raw_status = getattr(stage, "status", None)
|
|
417
|
+
status_value = getattr(raw_status, "value", str(raw_status)) if raw_status is not None else "unknown"
|
|
418
|
+
stage_data = {
|
|
419
|
+
"id": stage.id[:8],
|
|
420
|
+
"duration": stage.end_time - stage.start_time,
|
|
421
|
+
"status": status_value,
|
|
422
|
+
"input_tokens": stage.get_estimated_input_tokens(),
|
|
423
|
+
"output_tokens": stage.get_estimated_output_tokens(),
|
|
424
|
+
"input_content": getattr(stage.input, "content", None) if hasattr(stage, "input") else None,
|
|
425
|
+
"input_messages": [msg.to_dict() for msg in stage.input.messages.get_messages()] if hasattr(stage, "input") and getattr(stage.input, "messages", None) else [],
|
|
426
|
+
"answer": getattr(stage.output, "answer", None) if hasattr(stage, "output") else None,
|
|
427
|
+
"think": getattr(stage.output, "think", None) if hasattr(stage, "output") else None,
|
|
428
|
+
"raw_output": getattr(stage.output, "raw_output", None) if hasattr(stage, "output") else None,
|
|
429
|
+
}
|
|
430
|
+
profile_data["llm_interactions"].append(stage_data)
|
|
431
|
+
|
|
432
|
+
total_input_tokens += stage_data["input_tokens"]
|
|
433
|
+
total_output_tokens += stage_data["output_tokens"]
|
|
434
|
+
total_llm_time += stage_data["duration"]
|
|
435
|
+
|
|
436
|
+
profile_data["llm_summary"] = {
|
|
437
|
+
"total_stages": len(llm_stages),
|
|
438
|
+
"total_llm_time": total_llm_time,
|
|
439
|
+
"total_input_tokens": total_input_tokens,
|
|
440
|
+
"total_output_tokens": total_output_tokens,
|
|
441
|
+
"total_tokens": total_input_tokens + total_output_tokens,
|
|
442
|
+
"avg_tokens_per_sec": (total_input_tokens + total_output_tokens) / total_llm_time if total_llm_time > 0 else 0,
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
# --- Skill Interactions Details ---
|
|
446
|
+
skill_details_list = []
|
|
447
|
+
skill_summary_dict = {}
|
|
448
|
+
total_skill_time = 0
|
|
449
|
+
|
|
450
|
+
for stage in skill_stages:
|
|
451
|
+
skill_name = getattr(stage.skill_info, "name", "Unknown") if hasattr(stage, "skill_info") else "Unknown"
|
|
452
|
+
stage_time = stage.end_time - stage.start_time
|
|
453
|
+
raw_status = getattr(stage, "status", None)
|
|
454
|
+
status_value = getattr(raw_status, "value", str(raw_status)) if raw_status is not None else "unknown"
|
|
455
|
+
|
|
456
|
+
skill_details_list.append({
|
|
457
|
+
"id": stage.id[:8],
|
|
458
|
+
"name": skill_name,
|
|
459
|
+
"duration": stage_time,
|
|
460
|
+
"status": status_value,
|
|
461
|
+
})
|
|
462
|
+
|
|
463
|
+
if skill_name not in skill_summary_dict:
|
|
464
|
+
skill_summary_dict[skill_name] = {"count": 0, "total_time": 0.0}
|
|
465
|
+
skill_summary_dict[skill_name]["count"] += 1
|
|
466
|
+
skill_summary_dict[skill_name]["total_time"] += stage_time
|
|
467
|
+
total_skill_time += stage_time
|
|
468
|
+
|
|
469
|
+
profile_data["skill_interactions"] = skill_details_list
|
|
470
|
+
profile_data["skill_summary"] = {
|
|
471
|
+
"total_stages": len(skill_stages),
|
|
472
|
+
"total_skill_time": total_skill_time,
|
|
473
|
+
"details_by_skill": skill_summary_dict,
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
# --- Execution Summary ---
|
|
477
|
+
all_stages = stages_from_tree
|
|
478
|
+
total_execution_time = sum(
|
|
479
|
+
stage.end_time - stage.start_time for stage in all_stages if stage.start_time and stage.end_time
|
|
480
|
+
)
|
|
481
|
+
profile_data["execution_summary"] = {
|
|
482
|
+
"total_stages": len(all_stages),
|
|
483
|
+
"total_execution_time": total_execution_time,
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
# --- Context Information ---
|
|
487
|
+
context = None
|
|
488
|
+
for instance in self.visible_instances:
|
|
489
|
+
if hasattr(instance, "context") and instance.context:
|
|
490
|
+
context = instance.context
|
|
491
|
+
break
|
|
492
|
+
|
|
493
|
+
context_info = {}
|
|
494
|
+
if context:
|
|
495
|
+
context_info["user_id"] = getattr(context, "user_id", "N/A")
|
|
496
|
+
context_info["session_id"] = getattr(context, "session_id", "N/A")
|
|
497
|
+
if hasattr(context, "memory_manager") and context.memory_manager:
|
|
498
|
+
memory_config = getattr(context.memory_manager, "config", None)
|
|
499
|
+
if memory_config and hasattr(memory_config, "memory_config"):
|
|
500
|
+
mem_enabled = getattr(memory_config.memory_config, "enabled", False)
|
|
501
|
+
context_info["memory_enabled"] = mem_enabled
|
|
502
|
+
if mem_enabled:
|
|
503
|
+
context_info["max_knowledge_points"] = getattr(memory_config.memory_config, "max_knowledge_points", "N/A")
|
|
504
|
+
if hasattr(context, "variable_pool"):
|
|
505
|
+
context_info["variables_count"] = len(context.variable_pool.get_all_variables())
|
|
506
|
+
profile_data["context_information"] = context_info
|
|
507
|
+
|
|
508
|
+
return profile_data
|
|
509
|
+
|
|
510
|
+
def print_profile(self, title="Dolphin Runtime Profile", mode="brief"):
|
|
511
|
+
"""
|
|
512
|
+
Print human-readable profile visualization
|
|
513
|
+
|
|
514
|
+
Args:
|
|
515
|
+
title (str): Title to display at top of output
|
|
516
|
+
"""
|
|
517
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
518
|
+
|
|
519
|
+
# For backward compatibility, format the structured profile into a string for console printing.
|
|
520
|
+
structured_profile = self.profile(title)
|
|
521
|
+
|
|
522
|
+
lines = []
|
|
523
|
+
lines.append(f"{'=' * 80}")
|
|
524
|
+
lines.append(f"{structured_profile['title']:^80}")
|
|
525
|
+
lines.append(f"{'=' * 80}")
|
|
526
|
+
|
|
527
|
+
# --- Call Chain ---
|
|
528
|
+
lines.append("\n📊 CALL CHAIN OVERVIEW")
|
|
529
|
+
lines.append("-" * 40)
|
|
530
|
+
# Reconstruct string from structured call chain
|
|
531
|
+
def _format_structured_call_chain(nodes: List[Dict[str, Any]], depth: int = 0, prefix: str = ""):
|
|
532
|
+
formatted_lines = []
|
|
533
|
+
for node in nodes:
|
|
534
|
+
indent = " " * depth
|
|
535
|
+
icon_map = {
|
|
536
|
+
TypeRuntimeInstance.AGENT.value: "🤖",
|
|
537
|
+
TypeRuntimeInstance.BLOCK.value: "📦",
|
|
538
|
+
TypeRuntimeInstance.PROGRESS.value: "⚡",
|
|
539
|
+
TypeRuntimeInstance.STAGE.value: "🔄",
|
|
540
|
+
}
|
|
541
|
+
icon = icon_map.get(node['type'], "❓")
|
|
542
|
+
|
|
543
|
+
detail = f"{node['type'].capitalize()}[{node['name'] or node['id']}]"
|
|
544
|
+
if node['type'] == TypeRuntimeInstance.PROGRESS.value:
|
|
545
|
+
detail = f"Progress[{node['id']}] ({node['stage_count']} stages)"
|
|
546
|
+
elif node['type'] == TypeRuntimeInstance.STAGE.value:
|
|
547
|
+
detail = f"Stage[{node['id']}/{node['stage_type']}]"
|
|
548
|
+
if node['skill_name']:
|
|
549
|
+
detail += f"/{node['skill_name']}"
|
|
550
|
+
detail += f" - time[{node['duration']:.2f}s]"
|
|
551
|
+
if node['is_llm_stage']:
|
|
552
|
+
detail += f" - estimated_input[{node['estimated_input_tokens']}]"
|
|
553
|
+
detail += f" - estimated_output[{node['estimated_output_tokens']}]"
|
|
554
|
+
status_text = node['status']
|
|
555
|
+
if status_text != Status.COMPLETED.value:
|
|
556
|
+
detail += f" - WARNING: Status[{status_text}] (not completed)"
|
|
557
|
+
else:
|
|
558
|
+
detail += f" - Status[{status_text}]"
|
|
559
|
+
|
|
560
|
+
formatted_lines.append(f"{indent}{prefix}{icon} {detail}")
|
|
561
|
+
if node['children']:
|
|
562
|
+
formatted_lines.extend(_format_structured_call_chain(node['children'], depth + 1, " "))
|
|
563
|
+
return formatted_lines
|
|
564
|
+
|
|
565
|
+
lines.extend(_format_structured_call_chain(structured_profile['call_chain']))
|
|
566
|
+
|
|
567
|
+
# --- LLM Interactions ---
|
|
568
|
+
if structured_profile['llm_interactions']:
|
|
569
|
+
lines.append(f"\n🤖 LLM INTERACTION DETAILS ({structured_profile['llm_summary']['total_stages']} stages)")
|
|
570
|
+
lines.append("-" * 40)
|
|
571
|
+
for stage_data in structured_profile['llm_interactions']:
|
|
572
|
+
lines.append(f"\n📝 LLM Stage {stage_data['id']}")
|
|
573
|
+
lines.append(f" Duration: {stage_data['duration']:.2f}s")
|
|
574
|
+
lines.append(f" Status: {stage_data['status']}")
|
|
575
|
+
lines.append(f" Input Tokens: {stage_data['input_tokens']}")
|
|
576
|
+
lines.append(f" Output Tokens: {stage_data['output_tokens']}")
|
|
577
|
+
|
|
578
|
+
if stage_data['input_content']:
|
|
579
|
+
content_preview = stage_data['input_content'][:500].replace("\n", "\\n")
|
|
580
|
+
if len(stage_data['input_content']) > 500:
|
|
581
|
+
content_preview += "..."
|
|
582
|
+
lines.append(f" 💬 Input Content: {content_preview}")
|
|
583
|
+
|
|
584
|
+
if stage_data['input_messages']:
|
|
585
|
+
lines.append(f" 📨 Input Messages: {len(stage_data['input_messages'])} messages")
|
|
586
|
+
lines.append("\n 📝 Messages Table:")
|
|
587
|
+
lines.append(" " + "-" * 100)
|
|
588
|
+
lines.append(" | Role | Size (%) | Content")
|
|
589
|
+
lines.append(" |" + "-" * 11 + "|" + "-" * 10 + "|" + "-" * 78)
|
|
590
|
+
|
|
591
|
+
total_msg_length = sum(len(m['content']) for m in stage_data['input_messages'] if m['content'])
|
|
592
|
+
for msg in stage_data['input_messages']:
|
|
593
|
+
role = msg['role']
|
|
594
|
+
msg_ratio = (len(msg['content']) / total_msg_length * 100) if total_msg_length > 0 and msg['content'] else 0.0
|
|
595
|
+
|
|
596
|
+
content_parts = self._split_content_by_keywords(msg['content'])
|
|
597
|
+
|
|
598
|
+
has_printed_header = False
|
|
599
|
+
if content_parts:
|
|
600
|
+
content, _ = content_parts[0]
|
|
601
|
+
if mode == "brief":
|
|
602
|
+
content = content[:76] + ("..." if len(content) > 76 else "")
|
|
603
|
+
lines.append(f" | {role:<9} | {msg_ratio:>7.1f}% | {content}")
|
|
604
|
+
has_printed_header = True
|
|
605
|
+
for content, _ in content_parts[1:]:
|
|
606
|
+
if mode == "brief":
|
|
607
|
+
content = content[:76] + ("..." if len(content) > 76 else "")
|
|
608
|
+
lines.append(f" | {' ' * 9} | {' ' * 8} | {content}")
|
|
609
|
+
|
|
610
|
+
if msg.get('tool_calls'):
|
|
611
|
+
import json
|
|
612
|
+
for tool_call in msg['tool_calls']:
|
|
613
|
+
tool_name = tool_call.get('function', {}).get('name', 'N/A')
|
|
614
|
+
raw_args = tool_call.get('function', {}).get('arguments', '{}')
|
|
615
|
+
tool_args_short = ""
|
|
616
|
+
try:
|
|
617
|
+
parsed_args = json.loads(raw_args)
|
|
618
|
+
if isinstance(parsed_args, dict):
|
|
619
|
+
key_args = []
|
|
620
|
+
for k, v in parsed_args.items():
|
|
621
|
+
if len(key_args) >= 2: key_args.append("..."); break
|
|
622
|
+
value_str = str(v)[:20]
|
|
623
|
+
if len(str(v)) > 20: value_str += "..."
|
|
624
|
+
key_args.append(f"{k}={value_str}")
|
|
625
|
+
tool_args_short = ", ".join(key_args)
|
|
626
|
+
else:
|
|
627
|
+
tool_args_short = str(parsed_args)[:30]
|
|
628
|
+
except (json.JSONDecodeError, TypeError):
|
|
629
|
+
tool_args_short = str(raw_args)[:30]
|
|
630
|
+
|
|
631
|
+
content_display = f"🔧 {tool_name}({tool_args_short})" if tool_args_short else f"🔧 {tool_name}()"
|
|
632
|
+
if mode == "brief":
|
|
633
|
+
content_display = content_display[:76] + ("..." if len(content_display) > 76 else "")
|
|
634
|
+
|
|
635
|
+
if not has_printed_header:
|
|
636
|
+
lines.append(f" | {role:<9} | {msg_ratio:>7.1f}% | {content_display}")
|
|
637
|
+
has_printed_header = True
|
|
638
|
+
else:
|
|
639
|
+
lines.append(f" | {'':<9} | {' ' * 8} | {content_display}")
|
|
640
|
+
|
|
641
|
+
if not has_printed_header:
|
|
642
|
+
lines.append(f" | {role:<9} | {msg_ratio:>7.1f}% | [Empty Content]")
|
|
643
|
+
|
|
644
|
+
lines.append(" |" + "-" * 11 + "|" + "-" * 10 + "|" + "-" * 78)
|
|
645
|
+
|
|
646
|
+
if stage_data['answer']:
|
|
647
|
+
lines.append("\n 🎯 Answer:")
|
|
648
|
+
lines.append(" " + "-" * 80)
|
|
649
|
+
for line in stage_data['answer'].split("\n"): lines.append(f" {line}")
|
|
650
|
+
lines.append(" " + "-" * 80)
|
|
651
|
+
if stage_data['think']:
|
|
652
|
+
lines.append("\n 💭 Think:")
|
|
653
|
+
lines.append(" " + "-" * 60)
|
|
654
|
+
for line in stage_data['think'].split("\n"): lines.append(f" {line}")
|
|
655
|
+
lines.append(" " + "-" * 60)
|
|
656
|
+
if stage_data['raw_output'] and stage_data['raw_output'] != stage_data['answer']:
|
|
657
|
+
lines.append("\n 📄 Raw Output:")
|
|
658
|
+
lines.append(" " + "-" * 70)
|
|
659
|
+
for line in stage_data['raw_output'].split("\n"): lines.append(f" {line}")
|
|
660
|
+
lines.append(" " + "-" * 70)
|
|
661
|
+
|
|
662
|
+
llm_summary = structured_profile['llm_summary']
|
|
663
|
+
lines.append("\n📈 LLM SUMMARY")
|
|
664
|
+
lines.append(f" 🔢 Total Stages: {llm_summary['total_stages']}")
|
|
665
|
+
lines.append(f" ⏱️ Total LLM Time: {llm_summary['total_llm_time']:.2f}s")
|
|
666
|
+
lines.append(f" 📥 Total Input Tokens: {llm_summary['total_input_tokens']}")
|
|
667
|
+
lines.append(f" 📤 Total Output Tokens: {llm_summary['total_output_tokens']}")
|
|
668
|
+
lines.append(f" 💰 Total Tokens: {llm_summary['total_tokens']}")
|
|
669
|
+
if llm_summary['total_llm_time'] > 0:
|
|
670
|
+
lines.append(f" 🚀 Avg Tokens/sec: {llm_summary['avg_tokens_per_sec']:.2f}")
|
|
671
|
+
|
|
672
|
+
# --- Skill Interactions ---
|
|
673
|
+
if structured_profile['skill_interactions']:
|
|
674
|
+
lines.append(f"\n🛠️ SKILL INTERACTION SUMMARY ({structured_profile['skill_summary']['total_stages']} stages)")
|
|
675
|
+
lines.append("-" * 40)
|
|
676
|
+
for skill_name, stats in structured_profile['skill_summary']['details_by_skill'].items():
|
|
677
|
+
lines.append(f" 🔧 {skill_name}: {stats['count']} calls, {stats['total_time']:.2f}s")
|
|
678
|
+
lines.append(f" ⏱️ Total Skill Time: {structured_profile['skill_summary']['total_skill_time']:.2f}s")
|
|
679
|
+
|
|
680
|
+
# --- Execution Summary ---
|
|
681
|
+
exec_summary = structured_profile['execution_summary']
|
|
682
|
+
lines.append("\n🎯 EXECUTION SUMMARY")
|
|
683
|
+
lines.append("-" * 40)
|
|
684
|
+
lines.append(f" 📊 Total Stages: {exec_summary['total_stages']}")
|
|
685
|
+
lines.append(f" ⏱️ Total Execution Time: {exec_summary['total_execution_time']:.2f}s")
|
|
686
|
+
|
|
687
|
+
# --- Context Information ---
|
|
688
|
+
context_info = structured_profile['context_information']
|
|
689
|
+
lines.append("\n🔧 CONTEXT INFORMATION")
|
|
690
|
+
lines.append("-" * 40)
|
|
691
|
+
if context_info:
|
|
692
|
+
lines.append(f" 👤 User ID: {context_info.get('user_id', 'N/A')}")
|
|
693
|
+
lines.append(f" 🔗 Session ID: {context_info.get('session_id', 'N/A')}")
|
|
694
|
+
if context_info.get('memory_enabled') is not None:
|
|
695
|
+
lines.append(f" 🧠 Memory Enabled: {context_info['memory_enabled']}")
|
|
696
|
+
if context_info['memory_enabled']:
|
|
697
|
+
lines.append(f" 📚 Max Knowledge Points: {context_info.get('max_knowledge_points', 'N/A')}")
|
|
698
|
+
lines.append(f" 🗂️ Variables Count: {context_info.get('variables_count', 0)}")
|
|
699
|
+
else:
|
|
700
|
+
lines.append(" ❌ No context information available")
|
|
701
|
+
|
|
702
|
+
lines.append(f"\n{'=' * 80}")
|
|
703
|
+
|
|
704
|
+
console("\n".join(lines))
|
|
705
|
+
return "\n".join(lines)
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
def get_call_chain_summary(self):
|
|
709
|
+
"""
|
|
710
|
+
Get a concise summary of the call chain as a dictionary
|
|
711
|
+
|
|
712
|
+
Returns:
|
|
713
|
+
dict: Summary statistics and key information
|
|
714
|
+
"""
|
|
715
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
716
|
+
|
|
717
|
+
agents = [
|
|
718
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.AGENT
|
|
719
|
+
]
|
|
720
|
+
blocks = [
|
|
721
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.BLOCK
|
|
722
|
+
]
|
|
723
|
+
progresses = [
|
|
724
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.PROGRESS
|
|
725
|
+
]
|
|
726
|
+
stages = [
|
|
727
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.STAGE
|
|
728
|
+
]
|
|
729
|
+
|
|
730
|
+
return {
|
|
731
|
+
"total_instances": len(self.visible_instances),
|
|
732
|
+
"agents": len(agents),
|
|
733
|
+
"blocks": len(blocks),
|
|
734
|
+
"progresses": len(progresses),
|
|
735
|
+
"stages": len(stages),
|
|
736
|
+
"agent_names": [getattr(a, "name", "Unknown") for a in agents],
|
|
737
|
+
"block_types": [getattr(b, "name", "Unknown") for b in blocks],
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
def diagnose_runtime_issues(self):
|
|
741
|
+
"""
|
|
742
|
+
Diagnose common runtime issues and provide suggestions
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
dict: Diagnostic information and suggestions
|
|
746
|
+
"""
|
|
747
|
+
from dolphin.core.runtime.runtime_instance import TypeRuntimeInstance
|
|
748
|
+
|
|
749
|
+
issues = []
|
|
750
|
+
suggestions = []
|
|
751
|
+
|
|
752
|
+
# Check for duplicate agent instances
|
|
753
|
+
agents = [
|
|
754
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.AGENT
|
|
755
|
+
]
|
|
756
|
+
agent_names = [getattr(a, "name", "Unknown") for a in agents]
|
|
757
|
+
duplicate_names = [name for name in agent_names if agent_names.count(name) > 1]
|
|
758
|
+
|
|
759
|
+
if duplicate_names:
|
|
760
|
+
unique_duplicates = list(set(duplicate_names))
|
|
761
|
+
issues.append(f"Found duplicate agent instances: {unique_duplicates}")
|
|
762
|
+
suggestions.append(
|
|
763
|
+
"Check agent calling logic to avoid creating duplicate agent instances"
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
# Check for agents with incorrect parent hierarchy (agents should not be children of stages)
|
|
767
|
+
agents_with_stage_parents = []
|
|
768
|
+
for agent in agents:
|
|
769
|
+
if agent.parent and agent.parent.type == TypeRuntimeInstance.STAGE:
|
|
770
|
+
agents_with_stage_parents.append(getattr(agent, "name", "Unknown"))
|
|
771
|
+
|
|
772
|
+
if agents_with_stage_parents:
|
|
773
|
+
issues.append(
|
|
774
|
+
f"Found agents as children of stages (incorrect hierarchy): {agents_with_stage_parents}"
|
|
775
|
+
)
|
|
776
|
+
suggestions.append(
|
|
777
|
+
"Agents should be root-level or children of other agents, never children of stages"
|
|
778
|
+
)
|
|
779
|
+
|
|
780
|
+
# Check for incomplete stages
|
|
781
|
+
stages = [
|
|
782
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.STAGE
|
|
783
|
+
]
|
|
784
|
+
incomplete_stages = [
|
|
785
|
+
s for s in stages if getattr(s, "status", None) != Status.COMPLETED
|
|
786
|
+
]
|
|
787
|
+
|
|
788
|
+
if incomplete_stages:
|
|
789
|
+
issues.append(f"Found {len(incomplete_stages)} incomplete stages")
|
|
790
|
+
suggestions.append(
|
|
791
|
+
"Ensure all stages are properly completed with recorder.update(is_completed=True)"
|
|
792
|
+
)
|
|
793
|
+
|
|
794
|
+
# Check for missing progresses in blocks
|
|
795
|
+
blocks = [
|
|
796
|
+
i for i in self.visible_instances if i.type == TypeRuntimeInstance.BLOCK
|
|
797
|
+
]
|
|
798
|
+
block_without_progress = []
|
|
799
|
+
|
|
800
|
+
for block in blocks:
|
|
801
|
+
has_progress_child = any(
|
|
802
|
+
child.type == TypeRuntimeInstance.PROGRESS for child in block.children
|
|
803
|
+
)
|
|
804
|
+
if not has_progress_child:
|
|
805
|
+
block_without_progress.append(getattr(block, "name", "Unknown"))
|
|
806
|
+
|
|
807
|
+
if block_without_progress:
|
|
808
|
+
issues.append(f"Blocks without progress: {block_without_progress}")
|
|
809
|
+
suggestions.append(
|
|
810
|
+
"Check if progress instances are properly created and registered"
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
# Check for LLM stages with zero input tokens
|
|
814
|
+
llm_stages = [s for s in stages if self.is_llm_stage(s)]
|
|
815
|
+
zero_token_stages = [
|
|
816
|
+
s for s in llm_stages if s.get_estimated_input_tokens() == 0
|
|
817
|
+
]
|
|
818
|
+
|
|
819
|
+
if zero_token_stages:
|
|
820
|
+
issues.append(
|
|
821
|
+
f"Found {len(zero_token_stages)} LLM stages with 0 input tokens"
|
|
822
|
+
)
|
|
823
|
+
suggestions.append(
|
|
824
|
+
"Ensure whole_messages or input_message are properly set for LLM stages"
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
return {
|
|
828
|
+
"issues": issues,
|
|
829
|
+
"suggestions": suggestions,
|
|
830
|
+
"summary": {
|
|
831
|
+
"total_issues": len(issues),
|
|
832
|
+
"has_duplicate_agents": any(
|
|
833
|
+
"duplicate agent" in issue.lower() for issue in issues
|
|
834
|
+
),
|
|
835
|
+
"has_hierarchy_issues": any(
|
|
836
|
+
"hierarchy" in issue.lower() for issue in issues
|
|
837
|
+
),
|
|
838
|
+
"has_incomplete_stages": any(
|
|
839
|
+
"incomplete stages" in issue.lower() for issue in issues
|
|
840
|
+
),
|
|
841
|
+
},
|
|
842
|
+
"stats": {
|
|
843
|
+
"total_stages": len(stages),
|
|
844
|
+
"incomplete_stages": len(incomplete_stages),
|
|
845
|
+
"blocks_without_progress": len(block_without_progress),
|
|
846
|
+
"llm_stages_zero_tokens": len(zero_token_stages),
|
|
847
|
+
},
|
|
848
|
+
}
|
|
849
|
+
|
|
850
|
+
def print_runtime_health_check(self, title="Dolphin Runtime Health Check"):
|
|
851
|
+
"""
|
|
852
|
+
Print a comprehensive runtime health check report
|
|
853
|
+
|
|
854
|
+
Args:
|
|
855
|
+
title (str): Title for the health check report
|
|
856
|
+
"""
|
|
857
|
+
lines = []
|
|
858
|
+
lines.append(f"{'=' * 80}")
|
|
859
|
+
lines.append(f"{title:^80}")
|
|
860
|
+
lines.append(f"{'=' * 80}")
|
|
861
|
+
|
|
862
|
+
# Get diagnostic results
|
|
863
|
+
diagnostic = self.diagnose_runtime_issues()
|
|
864
|
+
issues = diagnostic.get("issues", [])
|
|
865
|
+
suggestions = diagnostic.get("suggestions", [])
|
|
866
|
+
summary = diagnostic.get("summary", {})
|
|
867
|
+
stats = diagnostic.get("stats", {})
|
|
868
|
+
|
|
869
|
+
# Overall health status
|
|
870
|
+
total_issues = summary.get("total_issues", 0)
|
|
871
|
+
if total_issues == 0:
|
|
872
|
+
lines.append("🟢 RUNTIME STATUS: HEALTHY - No issues detected")
|
|
873
|
+
elif total_issues <= 2:
|
|
874
|
+
lines.append("🟡 RUNTIME STATUS: WARNING - Minor issues detected")
|
|
875
|
+
else:
|
|
876
|
+
lines.append("🔴 RUNTIME STATUS: CRITICAL - Multiple issues detected")
|
|
877
|
+
|
|
878
|
+
lines.append("")
|
|
879
|
+
|
|
880
|
+
# Issues section
|
|
881
|
+
if issues:
|
|
882
|
+
lines.append("⚠️ ISSUES DETECTED:")
|
|
883
|
+
lines.append("-" * 40)
|
|
884
|
+
for i, issue in enumerate(issues, 1):
|
|
885
|
+
lines.append(f" {i}. {issue}")
|
|
886
|
+
lines.append("")
|
|
887
|
+
|
|
888
|
+
lines.append("💡 SUGGESTED ACTIONS:")
|
|
889
|
+
lines.append("-" * 40)
|
|
890
|
+
for i, suggestion in enumerate(suggestions, 1):
|
|
891
|
+
lines.append(f" {i}. {suggestion}")
|
|
892
|
+
else:
|
|
893
|
+
lines.append("✅ No runtime issues detected!")
|
|
894
|
+
|
|
895
|
+
lines.append("")
|
|
896
|
+
|
|
897
|
+
# Statistics
|
|
898
|
+
lines.append("📊 RUNTIME STATISTICS:")
|
|
899
|
+
lines.append("-" * 40)
|
|
900
|
+
call_summary = self.get_call_chain_summary()
|
|
901
|
+
lines.append(f" Total Instances: {call_summary.get('total_instances', 0)}")
|
|
902
|
+
lines.append(f" Agents: {call_summary.get('agents', 0)}")
|
|
903
|
+
lines.append(f" Blocks: {call_summary.get('blocks', 0)}")
|
|
904
|
+
lines.append(f" Progresses: {call_summary.get('progresses', 0)}")
|
|
905
|
+
lines.append(f" Stages: {call_summary.get('stages', 0)}")
|
|
906
|
+
|
|
907
|
+
if stats:
|
|
908
|
+
lines.append(f" Incomplete Stages: {stats.get('incomplete_stages', 0)}")
|
|
909
|
+
lines.append(
|
|
910
|
+
f" Blocks without Progress: {stats.get('blocks_without_progress', 0)}"
|
|
911
|
+
)
|
|
912
|
+
lines.append(
|
|
913
|
+
f" LLM Stages with 0 tokens: {stats.get('llm_stages_zero_tokens', 0)}"
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
# Agent names for reference
|
|
917
|
+
agent_names = call_summary.get("agent_names", [])
|
|
918
|
+
if agent_names:
|
|
919
|
+
lines.append(f" Active Agents: {', '.join(agent_names)}")
|
|
920
|
+
|
|
921
|
+
lines.append(f"{'=' * 80}")
|
|
922
|
+
|
|
923
|
+
# Print the report
|
|
924
|
+
report = "\n".join(lines)
|
|
925
|
+
console(report)
|
|
926
|
+
return report
|