kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
from rich.console import Console
|
|
2
|
+
from rich.theme import Theme
|
|
3
|
+
from rich.tree import Tree
|
|
4
|
+
from rich.table import Table
|
|
5
|
+
from rich.panel import Panel
|
|
6
|
+
from rich.layout import Layout
|
|
7
|
+
from rich.text import Text
|
|
8
|
+
from rich.markdown import Markdown
|
|
9
|
+
from rich.columns import Columns
|
|
10
|
+
from rich.style import Style
|
|
11
|
+
from rich import box
|
|
12
|
+
from typing import Dict, Any, List
|
|
13
|
+
|
|
14
|
+
# Define a modern color theme
|
|
15
|
+
CUSTOM_THEME = Theme({
|
|
16
|
+
"info": "dim cyan",
|
|
17
|
+
"warning": "magenta",
|
|
18
|
+
"danger": "bold red",
|
|
19
|
+
"success": "bold green",
|
|
20
|
+
"header": "bold white on blue",
|
|
21
|
+
"key": "bold cyan",
|
|
22
|
+
"value": "white",
|
|
23
|
+
"dim": "dim white",
|
|
24
|
+
"highlight": "bold yellow",
|
|
25
|
+
"llm_stage": "bold magenta",
|
|
26
|
+
"skill_stage": "bold green",
|
|
27
|
+
"block_type": "bold blue",
|
|
28
|
+
"agent_name": "bold yellow",
|
|
29
|
+
})
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class TraceVisualizer:
|
|
33
|
+
def __init__(self, console: Console = None, mode: str = "brief"):
|
|
34
|
+
"""Initialize TraceVisualizer.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
console: Rich Console instance (optional)
|
|
38
|
+
mode: Display mode - "brief" for compact view, "full" for detailed view
|
|
39
|
+
"""
|
|
40
|
+
self.console = console or Console(theme=CUSTOM_THEME)
|
|
41
|
+
self.mode = mode # "brief" or "full"
|
|
42
|
+
|
|
43
|
+
def display_trace(self, trace_data: Dict[str, Any]):
|
|
44
|
+
self.console.clear()
|
|
45
|
+
self._render_header(trace_data.get("title", "Dolphin Execution Trace"),
|
|
46
|
+
trace_data["context_information"].get("session_id", "N/A"),
|
|
47
|
+
trace_data["context_information"].get("user_id", "N/A"))
|
|
48
|
+
self._render_call_chain(trace_data["call_chain"])
|
|
49
|
+
self._render_llm_details(trace_data["llm_interactions"], trace_data["llm_summary"])
|
|
50
|
+
self._render_skill_details(trace_data["skill_interactions"], trace_data["skill_summary"])
|
|
51
|
+
self._render_execution_summary(trace_data["execution_summary"])
|
|
52
|
+
self._render_context_information(trace_data["context_information"])
|
|
53
|
+
self.console.print(Text("\n✨ End of Debug Session.", justify="center", style="dim"))
|
|
54
|
+
|
|
55
|
+
def _render_header(self, title: str, session_id: str, user_id: str):
|
|
56
|
+
grid = Table.grid(expand=True)
|
|
57
|
+
grid.add_column(justify="center", ratio=1)
|
|
58
|
+
grid.add_column(justify="right")
|
|
59
|
+
|
|
60
|
+
header_title = Text(f" 🐞 {title.upper()} ", style="header")
|
|
61
|
+
meta = Text(f"Session: {session_id} | User: {user_id} ", style="dim")
|
|
62
|
+
|
|
63
|
+
grid.add_row(header_title, meta)
|
|
64
|
+
self.console.print(Panel(grid, style="blue", box=box.HEAVY, padding=(0,0)))
|
|
65
|
+
self.console.print()
|
|
66
|
+
|
|
67
|
+
def _render_call_chain(self, call_chain_nodes: List[Dict[str, Any]]):
|
|
68
|
+
self.console.print(Text(" 📊 Call Chain Overview", style="bold underline"))
|
|
69
|
+
|
|
70
|
+
tree = Tree(f"[bold blue]Execution Root[/]", hide_root=True) # Use a hidden root
|
|
71
|
+
self._build_rich_tree(tree, call_chain_nodes)
|
|
72
|
+
|
|
73
|
+
self.console.print(Panel(tree, border_style="dim", title="Execution Path", title_align="left"))
|
|
74
|
+
self.console.print()
|
|
75
|
+
|
|
76
|
+
def _build_rich_tree(self, parent_tree: Tree, nodes: List[Dict[str, Any]]):
|
|
77
|
+
"""Build rich tree nodes from structured call_chain data."""
|
|
78
|
+
for node in nodes or []:
|
|
79
|
+
node_type = str(node.get("type", "")).lower()
|
|
80
|
+
node_id = node.get("id", "?")
|
|
81
|
+
node_name = node.get("name") or node_id
|
|
82
|
+
node_status = node.get("status")
|
|
83
|
+
node_duration = node.get("duration") or 0.0
|
|
84
|
+
|
|
85
|
+
icon_map = {
|
|
86
|
+
"agent": "🤖",
|
|
87
|
+
"block": "📦",
|
|
88
|
+
"progress": "⚡",
|
|
89
|
+
"stage": "🔄",
|
|
90
|
+
}
|
|
91
|
+
icon = icon_map.get(node_type, "❓")
|
|
92
|
+
|
|
93
|
+
# Base tags: Type + Name
|
|
94
|
+
label = Text(f"{icon} {node_type.upper()} [{node_name}]", style="block_type")
|
|
95
|
+
|
|
96
|
+
# Add information of a specific type, aligning as closely as possible with the amount of information in legacy print_profile
|
|
97
|
+
if node_type == "progress":
|
|
98
|
+
stage_count = node.get("stage_count")
|
|
99
|
+
if stage_count is not None:
|
|
100
|
+
label.append(f" ({stage_count} stages)", style="dim")
|
|
101
|
+
elif node_type == "stage":
|
|
102
|
+
stage_type = node.get("stage_type", "unknown")
|
|
103
|
+
skill_name = node.get("skill_name")
|
|
104
|
+
is_llm = bool(node.get("is_llm_stage", False))
|
|
105
|
+
|
|
106
|
+
# Stage type and skill name
|
|
107
|
+
label.append(f"/{stage_type}", style="dim")
|
|
108
|
+
if skill_name:
|
|
109
|
+
label.append(f"/{skill_name}", style="skill_stage")
|
|
110
|
+
|
|
111
|
+
# Show token estimates for LLM stage, consistent with legacy
|
|
112
|
+
if is_llm:
|
|
113
|
+
est_in = node.get("estimated_input_tokens")
|
|
114
|
+
est_out = node.get("estimated_output_tokens")
|
|
115
|
+
if est_in is not None:
|
|
116
|
+
label.append(f" - estimated_input[{est_in}]", style="llm_stage")
|
|
117
|
+
if est_out is not None:
|
|
118
|
+
label.append(f" - estimated_output[{est_out}]", style="llm_stage")
|
|
119
|
+
|
|
120
|
+
# Status + Duration (Only Stage has status, other nodes do not display UNKNOWN)
|
|
121
|
+
if node_type == "stage" and node_status is not None:
|
|
122
|
+
status_str = str(node_status).lower()
|
|
123
|
+
status_style = "success" if status_str == "completed" else "warning"
|
|
124
|
+
status_label = status_str.upper()
|
|
125
|
+
status_text = f"[{status_style}]● {status_label}[/]"
|
|
126
|
+
label.append(" ")
|
|
127
|
+
label.append(status_text + " ", style="")
|
|
128
|
+
label.append(f"({node_duration:.2f}s)", style="dim")
|
|
129
|
+
|
|
130
|
+
child_tree = parent_tree.add(label)
|
|
131
|
+
|
|
132
|
+
children = node.get("children") or []
|
|
133
|
+
if children:
|
|
134
|
+
self._build_rich_tree(child_tree, children)
|
|
135
|
+
|
|
136
|
+
def _render_llm_details(self, interactions: List[Dict[str, Any]], summary: Dict[str, Any]):
|
|
137
|
+
"""Render LLM interaction details section with message table."""
|
|
138
|
+
if not interactions:
|
|
139
|
+
return
|
|
140
|
+
|
|
141
|
+
self.console.print(Text("\n 🤖 LLM Interaction Details", style="bold underline"))
|
|
142
|
+
|
|
143
|
+
# Overview table
|
|
144
|
+
table = Table(show_header=True, header_style="bold magenta")
|
|
145
|
+
table.add_column("ID", style="dim", width=8)
|
|
146
|
+
table.add_column("Duration", justify="right")
|
|
147
|
+
table.add_column("Status", justify="center")
|
|
148
|
+
table.add_column("In Tokens", justify="right")
|
|
149
|
+
table.add_column("Out Tokens", justify="right")
|
|
150
|
+
|
|
151
|
+
for stage in interactions:
|
|
152
|
+
status_raw = str(stage.get("status", "unknown"))
|
|
153
|
+
status_style = "success" if status_raw.lower() == "completed" else "warning"
|
|
154
|
+
status_text = f"[{status_style}]{status_raw.upper()}[/]"
|
|
155
|
+
|
|
156
|
+
table.add_row(
|
|
157
|
+
str(stage.get("id", ""))[:8],
|
|
158
|
+
f"{stage.get('duration', 0.0):.2f}s",
|
|
159
|
+
status_text,
|
|
160
|
+
str(stage.get("input_tokens", 0)),
|
|
161
|
+
str(stage.get("output_tokens", 0)),
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
self.console.print(Panel(table, border_style="dim", title="LLM Stages", title_align="left"))
|
|
165
|
+
self.console.print()
|
|
166
|
+
|
|
167
|
+
# Detailed message table for each stage (in full mode or when few stages)
|
|
168
|
+
if self.mode == "full" or len(interactions) <= 2:
|
|
169
|
+
for stage in interactions:
|
|
170
|
+
self._render_stage_messages(stage)
|
|
171
|
+
|
|
172
|
+
# LLM Summary
|
|
173
|
+
if summary:
|
|
174
|
+
summary_table = Table(show_header=False, box=box.SIMPLE)
|
|
175
|
+
summary_table.add_row("Total Stages", str(summary.get("total_stages", 0)))
|
|
176
|
+
summary_table.add_row("Total LLM Time (s)", f"{summary.get('total_llm_time', 0.0):.2f}")
|
|
177
|
+
summary_table.add_row("Total Input Tokens", str(summary.get("total_input_tokens", 0)))
|
|
178
|
+
summary_table.add_row("Total Output Tokens", str(summary.get("total_output_tokens", 0)))
|
|
179
|
+
summary_table.add_row("Total Tokens", str(summary.get("total_tokens", 0)))
|
|
180
|
+
summary_table.add_row(
|
|
181
|
+
"Avg Tokens/sec",
|
|
182
|
+
f"{summary.get('avg_tokens_per_sec', 0.0):.2f}",
|
|
183
|
+
)
|
|
184
|
+
self.console.print(Panel(summary_table, border_style="dim", title="LLM Summary", title_align="left"))
|
|
185
|
+
self.console.print()
|
|
186
|
+
|
|
187
|
+
def _render_stage_messages(self, stage: Dict[str, Any]):
|
|
188
|
+
"""Render detailed message table for a single LLM stage."""
|
|
189
|
+
stage_id = str(stage.get("id", ""))[:8]
|
|
190
|
+
input_messages = stage.get("input_messages", [])
|
|
191
|
+
|
|
192
|
+
if not input_messages:
|
|
193
|
+
return
|
|
194
|
+
|
|
195
|
+
self.console.print(Text(f"\n 📝 Messages for Stage {stage_id}", style="bold"))
|
|
196
|
+
|
|
197
|
+
# Message table with row dividers
|
|
198
|
+
msg_table = Table(show_header=True, header_style="bold cyan", box=box.ROUNDED, show_lines=True)
|
|
199
|
+
msg_table.add_column("Role", style="yellow", width=10)
|
|
200
|
+
msg_table.add_column("Size %", justify="right", width=8)
|
|
201
|
+
msg_table.add_column("Content Preview", overflow="fold")
|
|
202
|
+
|
|
203
|
+
total_length = sum(len(m.get('content', '') or '') for m in input_messages)
|
|
204
|
+
|
|
205
|
+
for msg in input_messages:
|
|
206
|
+
role = msg.get('role', 'unknown')
|
|
207
|
+
content = msg.get('content', '') or ''
|
|
208
|
+
msg_ratio = (len(content) / total_length * 100) if total_length > 0 else 0.0
|
|
209
|
+
|
|
210
|
+
# Truncate content for display (only in brief mode)
|
|
211
|
+
if self.mode == "full":
|
|
212
|
+
# Full mode: show complete content
|
|
213
|
+
content_preview = content.replace('\n', '\\n')
|
|
214
|
+
else:
|
|
215
|
+
# Brief mode: truncate to 80 chars
|
|
216
|
+
max_len = 80
|
|
217
|
+
content_preview = content.replace('\n', '\\n')[:max_len]
|
|
218
|
+
if len(content) > max_len:
|
|
219
|
+
content_preview += "..."
|
|
220
|
+
|
|
221
|
+
# Handle tool calls
|
|
222
|
+
tool_calls = msg.get('tool_calls', [])
|
|
223
|
+
if tool_calls:
|
|
224
|
+
tool_info = []
|
|
225
|
+
for tc in tool_calls:
|
|
226
|
+
func = tc.get('function', {})
|
|
227
|
+
tool_name = func.get('name', 'unknown')
|
|
228
|
+
tool_info.append(f"🔧 {tool_name}()")
|
|
229
|
+
if content_preview:
|
|
230
|
+
content_preview = content_preview + " | " + ", ".join(tool_info)
|
|
231
|
+
else:
|
|
232
|
+
content_preview = ", ".join(tool_info)
|
|
233
|
+
|
|
234
|
+
# Use Text object to avoid Rich markup parsing issues with brackets like [/path/...]
|
|
235
|
+
msg_table.add_row(role, f"{msg_ratio:.1f}%", Text(content_preview))
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
self.console.print(msg_table)
|
|
239
|
+
|
|
240
|
+
# Answer and Think in full mode
|
|
241
|
+
if self.mode == "full":
|
|
242
|
+
answer = stage.get('answer')
|
|
243
|
+
think = stage.get('think')
|
|
244
|
+
|
|
245
|
+
if answer:
|
|
246
|
+
answer_text = Text()
|
|
247
|
+
answer_text.append("🎯 Answer: ", style="bold green")
|
|
248
|
+
# Full mode: show complete answer
|
|
249
|
+
answer_text.append(answer)
|
|
250
|
+
self.console.print(Panel(answer_text, border_style="green", title="Response", title_align="left"))
|
|
251
|
+
|
|
252
|
+
if think:
|
|
253
|
+
think_text = Text()
|
|
254
|
+
think_text.append("💭 Think: ", style="bold cyan")
|
|
255
|
+
# Full mode: show complete think
|
|
256
|
+
think_text.append(think)
|
|
257
|
+
self.console.print(Panel(think_text, border_style="cyan", title="Reasoning", title_align="left"))
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _render_skill_details(self, interactions: List[Dict[str, Any]], summary: Dict[str, Any]):
|
|
261
|
+
"""Render skill interaction details section."""
|
|
262
|
+
if not interactions and not summary:
|
|
263
|
+
return
|
|
264
|
+
|
|
265
|
+
self.console.print(Text("\n 🛠️ Skill Interaction Summary", style="bold underline"))
|
|
266
|
+
|
|
267
|
+
# Summary by skill name
|
|
268
|
+
details_by_skill = (summary or {}).get("details_by_skill", {}) if summary else {}
|
|
269
|
+
if details_by_skill:
|
|
270
|
+
table = Table(show_header=True, header_style="bold green")
|
|
271
|
+
table.add_column("Skill Name")
|
|
272
|
+
table.add_column("Calls", justify="right")
|
|
273
|
+
table.add_column("Total Time (s)", justify="right")
|
|
274
|
+
|
|
275
|
+
for skill_name, stats in details_by_skill.items():
|
|
276
|
+
table.add_row(
|
|
277
|
+
str(skill_name),
|
|
278
|
+
str(stats.get("count", 0)),
|
|
279
|
+
f"{stats.get('total_time', 0.0):.2f}",
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
self.console.print(Panel(table, border_style="dim", title="By Skill", title_align="left"))
|
|
283
|
+
self.console.print()
|
|
284
|
+
|
|
285
|
+
# Optional: list individual interactions if needed
|
|
286
|
+
if interactions:
|
|
287
|
+
table = Table(show_header=True, header_style="bold green")
|
|
288
|
+
table.add_column("ID", style="dim", width=8)
|
|
289
|
+
table.add_column("Skill", justify="left")
|
|
290
|
+
table.add_column("Duration", justify="right")
|
|
291
|
+
table.add_column("Status", justify="center")
|
|
292
|
+
|
|
293
|
+
for stage in interactions:
|
|
294
|
+
status_raw = str(stage.get("status", "unknown"))
|
|
295
|
+
status_style = "success" if status_raw.lower() == "completed" else "warning"
|
|
296
|
+
status_text = f"[{status_style}]{status_raw.upper()}[/]"
|
|
297
|
+
table.add_row(
|
|
298
|
+
str(stage.get("id", ""))[:8],
|
|
299
|
+
str(stage.get("name", "Unknown")),
|
|
300
|
+
f"{stage.get('duration', 0.0):.2f}s",
|
|
301
|
+
status_text,
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
self.console.print(Panel(table, border_style="dim", title="Skill Stages", title_align="left"))
|
|
305
|
+
self.console.print()
|
|
306
|
+
|
|
307
|
+
def _render_execution_summary(self, execution_summary: Dict[str, Any]):
|
|
308
|
+
"""Render overall execution summary."""
|
|
309
|
+
if not execution_summary:
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
table = Table(show_header=False, box=box.SIMPLE)
|
|
313
|
+
table.add_row("Total Stages", str(execution_summary.get("total_stages", 0)))
|
|
314
|
+
table.add_row(
|
|
315
|
+
"Total Execution Time (s)",
|
|
316
|
+
f"{execution_summary.get('total_execution_time', 0.0):.2f}",
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
self.console.print(Text("\n 🎯 Execution Summary", style="bold underline"))
|
|
320
|
+
self.console.print(Panel(table, border_style="dim", title="Execution", title_align="left"))
|
|
321
|
+
self.console.print()
|
|
322
|
+
|
|
323
|
+
def _render_context_information(self, context_info: Dict[str, Any]):
|
|
324
|
+
"""Render context information section."""
|
|
325
|
+
self.console.print(Text("\n 🔧 Context Information", style="bold underline"))
|
|
326
|
+
|
|
327
|
+
if not context_info:
|
|
328
|
+
self.console.print(Panel(Text("No context information available", style="dim"), border_style="dim"))
|
|
329
|
+
self.console.print()
|
|
330
|
+
return
|
|
331
|
+
|
|
332
|
+
table = Table(show_header=False, box=box.SIMPLE)
|
|
333
|
+
table.add_row("User ID", str(context_info.get("user_id", "N/A")))
|
|
334
|
+
table.add_row("Session ID", str(context_info.get("session_id", "N/A")))
|
|
335
|
+
|
|
336
|
+
if "memory_enabled" in context_info:
|
|
337
|
+
table.add_row("Memory Enabled", str(context_info.get("memory_enabled")))
|
|
338
|
+
if context_info.get("memory_enabled"):
|
|
339
|
+
table.add_row(
|
|
340
|
+
"Max Knowledge Points",
|
|
341
|
+
str(context_info.get("max_knowledge_points", "N/A")),
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
table.add_row("Variables Count", str(context_info.get("variables_count", 0)))
|
|
345
|
+
|
|
346
|
+
self.console.print(Panel(table, border_style="dim", title="Context", title_align="left"))
|
|
347
|
+
self.console.print()
|
|
348
|
+
|
|
349
|
+
def display_progress(self, stages: List[Dict[str, Any]]):
|
|
350
|
+
"""Display execution progress in a table."""
|
|
351
|
+
self.console.print(Text("\n 🔧 Execution Progress", style="bold underline"))
|
|
352
|
+
|
|
353
|
+
if not stages:
|
|
354
|
+
self.console.print("📭 No progress information available.")
|
|
355
|
+
return
|
|
356
|
+
|
|
357
|
+
table = Table(show_header=True, header_style="bold cyan", box=box.ROUNDED)
|
|
358
|
+
table.add_column("ID", style="dim", width=4)
|
|
359
|
+
table.add_column("Agent", style="yellow")
|
|
360
|
+
table.add_column("Stage", style="magenta")
|
|
361
|
+
table.add_column("Status", justify="center")
|
|
362
|
+
table.add_column("Dur(s)", justify="right", width=8)
|
|
363
|
+
table.add_column("Input (Preview)", width=30)
|
|
364
|
+
table.add_column("Answer (Preview)", width=30)
|
|
365
|
+
|
|
366
|
+
for i, stage in enumerate(stages):
|
|
367
|
+
# Status coloring
|
|
368
|
+
status = str(stage.get('status', 'unknown'))
|
|
369
|
+
if status == 'completed':
|
|
370
|
+
status_style = "green"
|
|
371
|
+
elif status == 'in_progress':
|
|
372
|
+
status_style = "yellow"
|
|
373
|
+
elif status == 'failed' or status == 'error':
|
|
374
|
+
status_style = "red"
|
|
375
|
+
else:
|
|
376
|
+
status_style = "white"
|
|
377
|
+
|
|
378
|
+
# Duration
|
|
379
|
+
start = stage.get('start_time')
|
|
380
|
+
end = stage.get('end_time')
|
|
381
|
+
duration = "-"
|
|
382
|
+
if start is not None and end is not None:
|
|
383
|
+
try:
|
|
384
|
+
duration = f"{float(end) - float(start):.2f}"
|
|
385
|
+
except (ValueError, TypeError):
|
|
386
|
+
pass
|
|
387
|
+
|
|
388
|
+
# Truncate text
|
|
389
|
+
input_msg = str(stage.get('input_message', ''))
|
|
390
|
+
input_preview = (input_msg[:27] + "...") if len(input_msg) > 30 else input_msg
|
|
391
|
+
|
|
392
|
+
answer = str(stage.get('answer', ''))
|
|
393
|
+
# If answer is empty, check block_answer
|
|
394
|
+
if not answer:
|
|
395
|
+
answer = str(stage.get('block_answer', ''))
|
|
396
|
+
|
|
397
|
+
answer_preview = (answer[:27] + "...") if len(answer) > 30 else answer
|
|
398
|
+
|
|
399
|
+
table.add_row(
|
|
400
|
+
str(i),
|
|
401
|
+
Text(str(stage.get('agent_name', '-')), style="yellow"),
|
|
402
|
+
Text(str(stage.get('stage', '-')), style="magenta"),
|
|
403
|
+
f"[{status_style}]{status}[/]",
|
|
404
|
+
duration,
|
|
405
|
+
Text(input_preview, style="dim"),
|
|
406
|
+
Text(answer_preview)
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
self.console.print(table)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory Management Subsystem for Dolphin Language SDK
|
|
3
|
+
|
|
4
|
+
This module provides long-term memory capabilities for intelligent agents,
|
|
5
|
+
supporting both simple memory storage and complex knowledge management with user isolation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dolphin.core.common import KnowledgePoint, SingleMessage
|
|
9
|
+
from .storage import MemoryFileSys
|
|
10
|
+
from .manager import MemoryManager
|
|
11
|
+
from .llm_calls import LLMCall, KnowledgeExtractionCall, KnowledgeMergeCall
|
|
12
|
+
|
|
13
|
+
from dolphin.core.config.global_config import MemoryConfig
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"SingleMessage",
|
|
17
|
+
"KnowledgePoint",
|
|
18
|
+
# Storage interfaces
|
|
19
|
+
"MemoryStorage",
|
|
20
|
+
"MemoryFileSys",
|
|
21
|
+
# Unified management
|
|
22
|
+
"MemoryManager",
|
|
23
|
+
"MemoryConfig",
|
|
24
|
+
# LLM processing
|
|
25
|
+
"LLMCall",
|
|
26
|
+
"KnowledgeExtractionCall",
|
|
27
|
+
"KnowledgeMergeCall",
|
|
28
|
+
]
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Async Task Processor - handles asynchronous knowledge extraction tasks.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import threading
|
|
6
|
+
import queue
|
|
7
|
+
import time
|
|
8
|
+
from typing import Optional, Dict, Any, Callable
|
|
9
|
+
from dolphin.core.logging.logger import get_logger
|
|
10
|
+
|
|
11
|
+
logger = get_logger("mem")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AsyncTaskProcessor:
|
|
15
|
+
"""Handles asynchronous task processing with a dedicated worker thread."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, task_handler: Callable[[Dict[str, Any]], None]):
|
|
18
|
+
"""
|
|
19
|
+
Initialize the async task processor.
|
|
20
|
+
|
|
21
|
+
:param task_handler: Function to handle each task, should accept a dict parameter
|
|
22
|
+
"""
|
|
23
|
+
self.task_handler = task_handler
|
|
24
|
+
|
|
25
|
+
# Initialize async processing components
|
|
26
|
+
self._task_queue = queue.Queue()
|
|
27
|
+
self._shutdown_event = threading.Event()
|
|
28
|
+
self._worker_thread = None
|
|
29
|
+
self._worker_lock = threading.Lock()
|
|
30
|
+
|
|
31
|
+
# Start worker thread
|
|
32
|
+
self._start_worker_thread()
|
|
33
|
+
|
|
34
|
+
def __del__(self):
|
|
35
|
+
"""Cleanup method to ensure worker thread is properly shut down."""
|
|
36
|
+
self.shutdown()
|
|
37
|
+
|
|
38
|
+
def shutdown(self):
|
|
39
|
+
"""
|
|
40
|
+
Gracefully shutdown the async processor and its worker thread.
|
|
41
|
+
"""
|
|
42
|
+
if self._worker_thread and self._worker_thread.is_alive():
|
|
43
|
+
logger.info("Shutting down AsyncTaskProcessor worker thread...")
|
|
44
|
+
|
|
45
|
+
# Signal shutdown and put None to wake up worker thread
|
|
46
|
+
self._shutdown_event.set()
|
|
47
|
+
self._task_queue.put(None) # Wake up worker thread
|
|
48
|
+
|
|
49
|
+
self._worker_thread.join(timeout=60) # Wait up to 60 seconds
|
|
50
|
+
if self._worker_thread.is_alive():
|
|
51
|
+
logger.warning(
|
|
52
|
+
"Worker thread did not shut down gracefully within timeout"
|
|
53
|
+
)
|
|
54
|
+
else:
|
|
55
|
+
logger.info("Worker thread shut down successfully")
|
|
56
|
+
|
|
57
|
+
def submit_task(self, task: Dict[str, Any]):
|
|
58
|
+
"""
|
|
59
|
+
Submit a task for asynchronous processing.
|
|
60
|
+
|
|
61
|
+
:param task: Task parameters dictionary
|
|
62
|
+
"""
|
|
63
|
+
if self._shutdown_event.is_set():
|
|
64
|
+
logger.warning("Cannot submit task - processor is shutting down")
|
|
65
|
+
return
|
|
66
|
+
|
|
67
|
+
self._task_queue.put(task)
|
|
68
|
+
logger.debug("Submitted task to async processor")
|
|
69
|
+
|
|
70
|
+
def wait_for_tasks_completion(self, timeout: Optional[float] = None) -> bool:
|
|
71
|
+
"""
|
|
72
|
+
Wait for all pending tasks to complete.
|
|
73
|
+
|
|
74
|
+
:param timeout: Maximum time to wait in seconds, None for indefinite wait
|
|
75
|
+
:return: True if all tasks completed, False if timeout occurred
|
|
76
|
+
"""
|
|
77
|
+
try:
|
|
78
|
+
if timeout is None:
|
|
79
|
+
self._task_queue.join()
|
|
80
|
+
return True
|
|
81
|
+
else:
|
|
82
|
+
# Use a loop to implement timeout for join()
|
|
83
|
+
start_time = time.time()
|
|
84
|
+
while not self._task_queue.empty():
|
|
85
|
+
if time.time() - start_time > timeout:
|
|
86
|
+
return False
|
|
87
|
+
time.sleep(0.1)
|
|
88
|
+
return True
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logger.error(f"Error waiting for tasks completion: {e}")
|
|
91
|
+
return False
|
|
92
|
+
|
|
93
|
+
def get_queue_status(self) -> Dict[str, Any]:
|
|
94
|
+
"""
|
|
95
|
+
Get current status of the task queue.
|
|
96
|
+
|
|
97
|
+
:return: Dictionary containing queue status information
|
|
98
|
+
"""
|
|
99
|
+
return {
|
|
100
|
+
"queue_size": self._task_queue.qsize(),
|
|
101
|
+
"worker_alive": (
|
|
102
|
+
self._worker_thread.is_alive() if self._worker_thread else False
|
|
103
|
+
),
|
|
104
|
+
"shutdown_requested": self._shutdown_event.is_set(),
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
def _start_worker_thread(self):
|
|
108
|
+
"""Start the worker thread for async task processing."""
|
|
109
|
+
self._worker_thread = threading.Thread(
|
|
110
|
+
target=self._worker_loop, name="AsyncTaskProcessor-Worker", daemon=True
|
|
111
|
+
)
|
|
112
|
+
self._worker_thread.start()
|
|
113
|
+
logger.info("Started AsyncTaskProcessor worker thread")
|
|
114
|
+
|
|
115
|
+
def _worker_loop(self):
|
|
116
|
+
"""Main loop for the worker thread."""
|
|
117
|
+
logger.info("AsyncTaskProcessor worker thread started")
|
|
118
|
+
|
|
119
|
+
while not self._shutdown_event.is_set():
|
|
120
|
+
try:
|
|
121
|
+
# Get task from queue with timeout
|
|
122
|
+
task = self._task_queue.get(timeout=1.0)
|
|
123
|
+
|
|
124
|
+
if task is None: # Shutdown signal
|
|
125
|
+
break
|
|
126
|
+
|
|
127
|
+
# Process the task with thread-safe execution
|
|
128
|
+
logger.debug(f"Processing task: {task}")
|
|
129
|
+
with self._worker_lock:
|
|
130
|
+
self._process_task(task)
|
|
131
|
+
|
|
132
|
+
# Mark task as done
|
|
133
|
+
self._task_queue.task_done()
|
|
134
|
+
|
|
135
|
+
except queue.Empty:
|
|
136
|
+
# Timeout reached, continue loop to check shutdown event
|
|
137
|
+
continue
|
|
138
|
+
except Exception as e:
|
|
139
|
+
logger.error(f"Error in worker thread: {e}")
|
|
140
|
+
|
|
141
|
+
logger.info("AsyncTaskProcessor worker thread stopped")
|
|
142
|
+
|
|
143
|
+
def _process_task(self, task: Dict[str, Any]):
|
|
144
|
+
"""
|
|
145
|
+
Process a single task using the provided task handler.
|
|
146
|
+
|
|
147
|
+
:param task: Task parameters dictionary
|
|
148
|
+
"""
|
|
149
|
+
try:
|
|
150
|
+
self.task_handler(task)
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logger.error(f"Failed to process task: {e}")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class AsyncKnowledgeExtractor:
|
|
156
|
+
"""Specialized async processor for knowledge extraction tasks."""
|
|
157
|
+
|
|
158
|
+
def __init__(self, memory_manager):
|
|
159
|
+
"""
|
|
160
|
+
Initialize the async knowledge extractor.
|
|
161
|
+
|
|
162
|
+
:param memory_manager: Reference to the memory manager instance
|
|
163
|
+
"""
|
|
164
|
+
self.memory_manager = memory_manager
|
|
165
|
+
self.processor = AsyncTaskProcessor(self._handle_extraction_task)
|
|
166
|
+
|
|
167
|
+
def submit_extraction_task(
|
|
168
|
+
self, user_id: str, messages, context, auto_merge: bool = True
|
|
169
|
+
):
|
|
170
|
+
"""
|
|
171
|
+
Submit a knowledge extraction task for asynchronous processing.
|
|
172
|
+
|
|
173
|
+
:param user_id: User ID for memory isolation
|
|
174
|
+
:param messages: Conversation messages to extract knowledge from
|
|
175
|
+
:param context: Context instance needed for LLMClient initialization
|
|
176
|
+
:param auto_merge: Whether to automatically merge knowledge after extraction
|
|
177
|
+
"""
|
|
178
|
+
# Use Context's built-in copy method to avoid threading lock serialization issues
|
|
179
|
+
context_copy = context.copy() if hasattr(context, "copy") else context
|
|
180
|
+
|
|
181
|
+
# Create task
|
|
182
|
+
task = {
|
|
183
|
+
"user_id": user_id,
|
|
184
|
+
"messages": messages,
|
|
185
|
+
"context": context_copy,
|
|
186
|
+
"auto_merge": auto_merge,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
self.processor.submit_task(task)
|
|
190
|
+
logger.info(f"Submitted async knowledge extraction task for user {user_id}")
|
|
191
|
+
|
|
192
|
+
def shutdown(self):
|
|
193
|
+
"""Shutdown the async knowledge extractor."""
|
|
194
|
+
self.processor.shutdown()
|
|
195
|
+
|
|
196
|
+
def wait_for_tasks_completion(self, timeout: Optional[float] = None) -> bool:
|
|
197
|
+
"""Wait for all pending tasks to complete."""
|
|
198
|
+
return self.processor.wait_for_tasks_completion(timeout)
|
|
199
|
+
|
|
200
|
+
def get_queue_status(self) -> Dict[str, Any]:
|
|
201
|
+
"""Get current status of the task queue."""
|
|
202
|
+
return self.processor.get_queue_status()
|
|
203
|
+
|
|
204
|
+
def _handle_extraction_task(self, task: Dict[str, Any]):
|
|
205
|
+
"""
|
|
206
|
+
Handle a knowledge extraction task.
|
|
207
|
+
|
|
208
|
+
:param task: Task parameters dictionary
|
|
209
|
+
"""
|
|
210
|
+
user_id = task["user_id"]
|
|
211
|
+
messages = task["messages"]
|
|
212
|
+
context = task["context"]
|
|
213
|
+
auto_merge = task["auto_merge"]
|
|
214
|
+
|
|
215
|
+
logger.info(f"Processing async knowledge extraction for user {user_id}")
|
|
216
|
+
|
|
217
|
+
# Delegate to memory manager's internal extraction method
|
|
218
|
+
self.memory_manager._extract_knowledge_internal(
|
|
219
|
+
user_id, messages, context, auto_merge
|
|
220
|
+
)
|