kweaver-dolphin 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DolphinLanguageSDK/__init__.py +58 -0
- dolphin/__init__.py +62 -0
- dolphin/cli/__init__.py +20 -0
- dolphin/cli/args/__init__.py +9 -0
- dolphin/cli/args/parser.py +567 -0
- dolphin/cli/builtin_agents/__init__.py +22 -0
- dolphin/cli/commands/__init__.py +4 -0
- dolphin/cli/interrupt/__init__.py +8 -0
- dolphin/cli/interrupt/handler.py +205 -0
- dolphin/cli/interrupt/keyboard.py +82 -0
- dolphin/cli/main.py +49 -0
- dolphin/cli/multimodal/__init__.py +34 -0
- dolphin/cli/multimodal/clipboard.py +327 -0
- dolphin/cli/multimodal/handler.py +249 -0
- dolphin/cli/multimodal/image_processor.py +214 -0
- dolphin/cli/multimodal/input_parser.py +149 -0
- dolphin/cli/runner/__init__.py +8 -0
- dolphin/cli/runner/runner.py +989 -0
- dolphin/cli/ui/__init__.py +10 -0
- dolphin/cli/ui/console.py +2795 -0
- dolphin/cli/ui/input.py +340 -0
- dolphin/cli/ui/layout.py +425 -0
- dolphin/cli/ui/stream_renderer.py +302 -0
- dolphin/cli/utils/__init__.py +8 -0
- dolphin/cli/utils/helpers.py +135 -0
- dolphin/cli/utils/version.py +49 -0
- dolphin/core/__init__.py +107 -0
- dolphin/core/agent/__init__.py +10 -0
- dolphin/core/agent/agent_state.py +69 -0
- dolphin/core/agent/base_agent.py +970 -0
- dolphin/core/code_block/__init__.py +0 -0
- dolphin/core/code_block/agent_init_block.py +0 -0
- dolphin/core/code_block/assign_block.py +98 -0
- dolphin/core/code_block/basic_code_block.py +1865 -0
- dolphin/core/code_block/explore_block.py +1327 -0
- dolphin/core/code_block/explore_block_v2.py +712 -0
- dolphin/core/code_block/explore_strategy.py +672 -0
- dolphin/core/code_block/judge_block.py +220 -0
- dolphin/core/code_block/prompt_block.py +32 -0
- dolphin/core/code_block/skill_call_deduplicator.py +291 -0
- dolphin/core/code_block/tool_block.py +129 -0
- dolphin/core/common/__init__.py +17 -0
- dolphin/core/common/constants.py +176 -0
- dolphin/core/common/enums.py +1173 -0
- dolphin/core/common/exceptions.py +133 -0
- dolphin/core/common/multimodal.py +539 -0
- dolphin/core/common/object_type.py +165 -0
- dolphin/core/common/output_format.py +432 -0
- dolphin/core/common/types.py +36 -0
- dolphin/core/config/__init__.py +16 -0
- dolphin/core/config/global_config.py +1289 -0
- dolphin/core/config/ontology_config.py +133 -0
- dolphin/core/context/__init__.py +12 -0
- dolphin/core/context/context.py +1580 -0
- dolphin/core/context/context_manager.py +161 -0
- dolphin/core/context/var_output.py +82 -0
- dolphin/core/context/variable_pool.py +356 -0
- dolphin/core/context_engineer/__init__.py +41 -0
- dolphin/core/context_engineer/config/__init__.py +5 -0
- dolphin/core/context_engineer/config/settings.py +402 -0
- dolphin/core/context_engineer/core/__init__.py +7 -0
- dolphin/core/context_engineer/core/budget_manager.py +327 -0
- dolphin/core/context_engineer/core/context_assembler.py +583 -0
- dolphin/core/context_engineer/core/context_manager.py +637 -0
- dolphin/core/context_engineer/core/tokenizer_service.py +260 -0
- dolphin/core/context_engineer/example/incremental_example.py +267 -0
- dolphin/core/context_engineer/example/traditional_example.py +334 -0
- dolphin/core/context_engineer/services/__init__.py +5 -0
- dolphin/core/context_engineer/services/compressor.py +399 -0
- dolphin/core/context_engineer/utils/__init__.py +6 -0
- dolphin/core/context_engineer/utils/context_utils.py +441 -0
- dolphin/core/context_engineer/utils/message_formatter.py +270 -0
- dolphin/core/context_engineer/utils/token_utils.py +139 -0
- dolphin/core/coroutine/__init__.py +15 -0
- dolphin/core/coroutine/context_snapshot.py +154 -0
- dolphin/core/coroutine/context_snapshot_profile.py +922 -0
- dolphin/core/coroutine/context_snapshot_store.py +268 -0
- dolphin/core/coroutine/execution_frame.py +145 -0
- dolphin/core/coroutine/execution_state_registry.py +161 -0
- dolphin/core/coroutine/resume_handle.py +101 -0
- dolphin/core/coroutine/step_result.py +101 -0
- dolphin/core/executor/__init__.py +18 -0
- dolphin/core/executor/debug_controller.py +630 -0
- dolphin/core/executor/dolphin_executor.py +1063 -0
- dolphin/core/executor/executor.py +624 -0
- dolphin/core/flags/__init__.py +27 -0
- dolphin/core/flags/definitions.py +49 -0
- dolphin/core/flags/manager.py +113 -0
- dolphin/core/hook/__init__.py +95 -0
- dolphin/core/hook/expression_evaluator.py +499 -0
- dolphin/core/hook/hook_dispatcher.py +380 -0
- dolphin/core/hook/hook_types.py +248 -0
- dolphin/core/hook/isolated_variable_pool.py +284 -0
- dolphin/core/interfaces.py +53 -0
- dolphin/core/llm/__init__.py +0 -0
- dolphin/core/llm/llm.py +495 -0
- dolphin/core/llm/llm_call.py +100 -0
- dolphin/core/llm/llm_client.py +1285 -0
- dolphin/core/llm/message_sanitizer.py +120 -0
- dolphin/core/logging/__init__.py +20 -0
- dolphin/core/logging/logger.py +526 -0
- dolphin/core/message/__init__.py +8 -0
- dolphin/core/message/compressor.py +749 -0
- dolphin/core/parser/__init__.py +8 -0
- dolphin/core/parser/parser.py +405 -0
- dolphin/core/runtime/__init__.py +10 -0
- dolphin/core/runtime/runtime_graph.py +926 -0
- dolphin/core/runtime/runtime_instance.py +446 -0
- dolphin/core/skill/__init__.py +14 -0
- dolphin/core/skill/context_retention.py +157 -0
- dolphin/core/skill/skill_function.py +686 -0
- dolphin/core/skill/skill_matcher.py +282 -0
- dolphin/core/skill/skillkit.py +700 -0
- dolphin/core/skill/skillset.py +72 -0
- dolphin/core/trajectory/__init__.py +10 -0
- dolphin/core/trajectory/recorder.py +189 -0
- dolphin/core/trajectory/trajectory.py +522 -0
- dolphin/core/utils/__init__.py +9 -0
- dolphin/core/utils/cache_kv.py +212 -0
- dolphin/core/utils/tools.py +340 -0
- dolphin/lib/__init__.py +93 -0
- dolphin/lib/debug/__init__.py +8 -0
- dolphin/lib/debug/visualizer.py +409 -0
- dolphin/lib/memory/__init__.py +28 -0
- dolphin/lib/memory/async_processor.py +220 -0
- dolphin/lib/memory/llm_calls.py +195 -0
- dolphin/lib/memory/manager.py +78 -0
- dolphin/lib/memory/sandbox.py +46 -0
- dolphin/lib/memory/storage.py +245 -0
- dolphin/lib/memory/utils.py +51 -0
- dolphin/lib/ontology/__init__.py +12 -0
- dolphin/lib/ontology/basic/__init__.py +0 -0
- dolphin/lib/ontology/basic/base.py +102 -0
- dolphin/lib/ontology/basic/concept.py +130 -0
- dolphin/lib/ontology/basic/object.py +11 -0
- dolphin/lib/ontology/basic/relation.py +63 -0
- dolphin/lib/ontology/datasource/__init__.py +27 -0
- dolphin/lib/ontology/datasource/datasource.py +66 -0
- dolphin/lib/ontology/datasource/oracle_datasource.py +338 -0
- dolphin/lib/ontology/datasource/sql.py +845 -0
- dolphin/lib/ontology/mapping.py +177 -0
- dolphin/lib/ontology/ontology.py +733 -0
- dolphin/lib/ontology/ontology_context.py +16 -0
- dolphin/lib/ontology/ontology_manager.py +107 -0
- dolphin/lib/skill_results/__init__.py +31 -0
- dolphin/lib/skill_results/cache_backend.py +559 -0
- dolphin/lib/skill_results/result_processor.py +181 -0
- dolphin/lib/skill_results/result_reference.py +179 -0
- dolphin/lib/skill_results/skillkit_hook.py +324 -0
- dolphin/lib/skill_results/strategies.py +328 -0
- dolphin/lib/skill_results/strategy_registry.py +150 -0
- dolphin/lib/skillkits/__init__.py +44 -0
- dolphin/lib/skillkits/agent_skillkit.py +155 -0
- dolphin/lib/skillkits/cognitive_skillkit.py +82 -0
- dolphin/lib/skillkits/env_skillkit.py +250 -0
- dolphin/lib/skillkits/mcp_adapter.py +616 -0
- dolphin/lib/skillkits/mcp_skillkit.py +771 -0
- dolphin/lib/skillkits/memory_skillkit.py +650 -0
- dolphin/lib/skillkits/noop_skillkit.py +31 -0
- dolphin/lib/skillkits/ontology_skillkit.py +89 -0
- dolphin/lib/skillkits/plan_act_skillkit.py +452 -0
- dolphin/lib/skillkits/resource/__init__.py +52 -0
- dolphin/lib/skillkits/resource/models/__init__.py +6 -0
- dolphin/lib/skillkits/resource/models/skill_config.py +109 -0
- dolphin/lib/skillkits/resource/models/skill_meta.py +127 -0
- dolphin/lib/skillkits/resource/resource_skillkit.py +393 -0
- dolphin/lib/skillkits/resource/skill_cache.py +215 -0
- dolphin/lib/skillkits/resource/skill_loader.py +395 -0
- dolphin/lib/skillkits/resource/skill_validator.py +406 -0
- dolphin/lib/skillkits/resource_skillkit.py +11 -0
- dolphin/lib/skillkits/search_skillkit.py +163 -0
- dolphin/lib/skillkits/sql_skillkit.py +274 -0
- dolphin/lib/skillkits/system_skillkit.py +509 -0
- dolphin/lib/skillkits/vm_skillkit.py +65 -0
- dolphin/lib/utils/__init__.py +9 -0
- dolphin/lib/utils/data_process.py +207 -0
- dolphin/lib/utils/handle_progress.py +178 -0
- dolphin/lib/utils/security.py +139 -0
- dolphin/lib/utils/text_retrieval.py +462 -0
- dolphin/lib/vm/__init__.py +11 -0
- dolphin/lib/vm/env_executor.py +895 -0
- dolphin/lib/vm/python_session_manager.py +453 -0
- dolphin/lib/vm/vm.py +610 -0
- dolphin/sdk/__init__.py +60 -0
- dolphin/sdk/agent/__init__.py +12 -0
- dolphin/sdk/agent/agent_factory.py +236 -0
- dolphin/sdk/agent/dolphin_agent.py +1106 -0
- dolphin/sdk/api/__init__.py +4 -0
- dolphin/sdk/runtime/__init__.py +8 -0
- dolphin/sdk/runtime/env.py +363 -0
- dolphin/sdk/skill/__init__.py +10 -0
- dolphin/sdk/skill/global_skills.py +706 -0
- dolphin/sdk/skill/traditional_toolkit.py +260 -0
- kweaver_dolphin-0.1.0.dist-info/METADATA +521 -0
- kweaver_dolphin-0.1.0.dist-info/RECORD +199 -0
- kweaver_dolphin-0.1.0.dist-info/WHEEL +5 -0
- kweaver_dolphin-0.1.0.dist-info/entry_points.txt +27 -0
- kweaver_dolphin-0.1.0.dist-info/licenses/LICENSE.txt +201 -0
- kweaver_dolphin-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
"""ContextAssembler Traditional Full-Volume Processing Example
|
|
2
|
+
|
|
3
|
+
Demonstrates how to use ContextAssembler for traditional full-volume processing,
|
|
4
|
+
compared with incremental management methods, showcasing the workflow and characteristics of the traditional approach.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import os
|
|
9
|
+
from tabulate import tabulate
|
|
10
|
+
|
|
11
|
+
# Add item path
|
|
12
|
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..", "src"))
|
|
13
|
+
|
|
14
|
+
from dolphin.core.context_engineer.core.context_assembler import (
|
|
15
|
+
ContextAssembler,
|
|
16
|
+
AssembledContext,
|
|
17
|
+
)
|
|
18
|
+
from dolphin.core.context_engineer.core.budget_manager import (
|
|
19
|
+
BudgetManager,
|
|
20
|
+
BudgetAllocation,
|
|
21
|
+
)
|
|
22
|
+
from dolphin.core.context_engineer.config.settings import (
|
|
23
|
+
ContextConfig,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_context_config() -> ContextConfig:
|
|
28
|
+
"""Get context configuration"""
|
|
29
|
+
return ContextConfig.from_dict(
|
|
30
|
+
{
|
|
31
|
+
"model": {
|
|
32
|
+
"name": "gpt-4",
|
|
33
|
+
"context_limit": 1000,
|
|
34
|
+
"output_target": 100,
|
|
35
|
+
},
|
|
36
|
+
"buckets": {
|
|
37
|
+
"system": {
|
|
38
|
+
"name": "system",
|
|
39
|
+
"min_tokens": 20,
|
|
40
|
+
"max_tokens": 100,
|
|
41
|
+
"weight": 2.0,
|
|
42
|
+
"message_role": "system",
|
|
43
|
+
},
|
|
44
|
+
"task": {
|
|
45
|
+
"name": "task",
|
|
46
|
+
"min_tokens": 10,
|
|
47
|
+
"max_tokens": 200,
|
|
48
|
+
"weight": 2.0,
|
|
49
|
+
"message_role": "user",
|
|
50
|
+
},
|
|
51
|
+
"history": {
|
|
52
|
+
"name": "history",
|
|
53
|
+
"min_tokens": 10,
|
|
54
|
+
"max_tokens": 300,
|
|
55
|
+
"weight": 1.0,
|
|
56
|
+
"message_role": "user",
|
|
57
|
+
},
|
|
58
|
+
"tool_response": {
|
|
59
|
+
"name": "tool_response",
|
|
60
|
+
"min_tokens": 10,
|
|
61
|
+
"max_tokens": 100,
|
|
62
|
+
"weight": 1.5,
|
|
63
|
+
"message_role": "tool",
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
"policies": {
|
|
67
|
+
"default": {
|
|
68
|
+
"drop_order": [],
|
|
69
|
+
"bucket_order": [
|
|
70
|
+
"system",
|
|
71
|
+
"task",
|
|
72
|
+
"tool_response",
|
|
73
|
+
"history",
|
|
74
|
+
],
|
|
75
|
+
}
|
|
76
|
+
},
|
|
77
|
+
}
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def print_assembled_context_stats(
|
|
82
|
+
assembled_context: AssembledContext, title: str
|
|
83
|
+
) -> None:
|
|
84
|
+
"""Print statistics of the assembly context"""
|
|
85
|
+
print(f"\n=== {title} ===")
|
|
86
|
+
|
|
87
|
+
# Overall Statistics
|
|
88
|
+
overview_data = [
|
|
89
|
+
["总token数", assembled_context.total_tokens],
|
|
90
|
+
["部分数量", len(assembled_context.sections)],
|
|
91
|
+
["丢弃部分", len(assembled_context.dropped_sections)],
|
|
92
|
+
]
|
|
93
|
+
print(tabulate(overview_data, headers=["项目", "值"], tablefmt="grid"))
|
|
94
|
+
|
|
95
|
+
# Detailed Statistics
|
|
96
|
+
if assembled_context.sections:
|
|
97
|
+
print("\n=== 各部分详细信息 ===")
|
|
98
|
+
headers = ["部分名称", "Token数", "分配Token", "优先级", "消息角色", "利用率"]
|
|
99
|
+
data = []
|
|
100
|
+
for section in assembled_context.sections:
|
|
101
|
+
utilization = (
|
|
102
|
+
section.token_count / section.allocated_tokens
|
|
103
|
+
if section.allocated_tokens > 0
|
|
104
|
+
else 0
|
|
105
|
+
)
|
|
106
|
+
data.append(
|
|
107
|
+
[
|
|
108
|
+
section.name,
|
|
109
|
+
section.token_count,
|
|
110
|
+
section.allocated_tokens,
|
|
111
|
+
section.priority,
|
|
112
|
+
section.message_role.value,
|
|
113
|
+
f"{utilization:.2%}",
|
|
114
|
+
]
|
|
115
|
+
)
|
|
116
|
+
print(tabulate(data, headers=headers, tablefmt="grid"))
|
|
117
|
+
|
|
118
|
+
# Location Mapping
|
|
119
|
+
print("\n=== 桶顺序 ===")
|
|
120
|
+
for position, buckets in assembled_context.placement_map.items():
|
|
121
|
+
if buckets:
|
|
122
|
+
print(f"{position}: {buckets}")
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def demonstrate_traditional_workflow():
|
|
126
|
+
"""Demonstrate the traditional full-processing workflow"""
|
|
127
|
+
print("=== 传统全量处理工作流程演示 ===")
|
|
128
|
+
|
|
129
|
+
# Initialize component
|
|
130
|
+
context_config = get_context_config()
|
|
131
|
+
assembler = ContextAssembler(context_config=context_config)
|
|
132
|
+
budget_manager = BudgetManager(context_config=context_config)
|
|
133
|
+
|
|
134
|
+
# Prepare initial content
|
|
135
|
+
print("\n=== 步骤1: 准备内容 ===")
|
|
136
|
+
content_sections = {
|
|
137
|
+
"system": "你是一个专业的编程助手,擅长帮助用户解决编程问题。",
|
|
138
|
+
"task": "用户想要学习如何使用Python进行数据分析和可视化。",
|
|
139
|
+
"history": "用户:我想学习Python数据分析\n助手:我可以帮你学习Python数据分析,从基础开始。",
|
|
140
|
+
"tool_response": "Python数据分析工具已加载完成",
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
# Calculate Budget Allocation
|
|
144
|
+
print("\n=== 步骤2: 计算预算分配 ===")
|
|
145
|
+
budget_allocations = budget_manager.allocate_budget()
|
|
146
|
+
|
|
147
|
+
headers = ["桶名称", "分配Token", "优先级"]
|
|
148
|
+
allocation_data = []
|
|
149
|
+
for allocation in budget_allocations:
|
|
150
|
+
allocation_data.append(
|
|
151
|
+
[
|
|
152
|
+
allocation.bucket_name,
|
|
153
|
+
allocation.allocated_tokens,
|
|
154
|
+
allocation.priority,
|
|
155
|
+
]
|
|
156
|
+
)
|
|
157
|
+
print(tabulate(allocation_data, headers=headers, tablefmt="grid"))
|
|
158
|
+
|
|
159
|
+
# First Assembly
|
|
160
|
+
print("\n=== 步骤3: 第一次全量组装 ===")
|
|
161
|
+
assembled_context = assembler.assemble_context(
|
|
162
|
+
content_sections=content_sections,
|
|
163
|
+
budget_allocations=budget_allocations,
|
|
164
|
+
)
|
|
165
|
+
print_assembled_context_stats(assembled_context, "第一次组装结果")
|
|
166
|
+
|
|
167
|
+
# Changelog (all content needs to be reprocessed)
|
|
168
|
+
print("\n=== 步骤4: 更新内容(传统方式需要重新处理所有内容) ===")
|
|
169
|
+
updated_content_sections = content_sections.copy()
|
|
170
|
+
updated_content_sections["task"] = (
|
|
171
|
+
"用户想要学习如何使用Python进行数据分析和可视化,特别是使用pandas和matplotlib库。"
|
|
172
|
+
)
|
|
173
|
+
updated_content_sections["history"] += (
|
|
174
|
+
"\n用户:我想了解pandas和matplotlib\n助手:pandas用于数据处理,matplotlib用于绘图。"
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Second Assembly (Full Processing)
|
|
178
|
+
print("\n=== 步骤5: 第二次全量组装 ===")
|
|
179
|
+
assembled_context = assembler.assemble_context(
|
|
180
|
+
content_sections=updated_content_sections,
|
|
181
|
+
budget_allocations=budget_allocations,
|
|
182
|
+
)
|
|
183
|
+
print_assembled_context_stats(assembled_context, "第二次组装结果")
|
|
184
|
+
|
|
185
|
+
# Update content again
|
|
186
|
+
print("\n=== 步骤6: 再次更新内容 ===")
|
|
187
|
+
final_content_sections = updated_content_sections.copy()
|
|
188
|
+
final_content_sections["history"] += (
|
|
189
|
+
"\n用户:请给我一个示例\n助手:好的,我来给你一个数据分析的示例。"
|
|
190
|
+
)
|
|
191
|
+
final_content_sections["tool_response"] = (
|
|
192
|
+
"数据分析工具已准备就绪,包含pandas、numpy、matplotlib等库"
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Third Assembly (Full Processing)
|
|
196
|
+
print("\n=== 步骤7: 第三次全量组装 ===")
|
|
197
|
+
assembled_context = assembler.assemble_context(
|
|
198
|
+
content_sections=final_content_sections,
|
|
199
|
+
budget_allocations=budget_allocations,
|
|
200
|
+
)
|
|
201
|
+
print_assembled_context_stats(assembled_context, "第三次组装结果")
|
|
202
|
+
|
|
203
|
+
# Convert to message format
|
|
204
|
+
print("\n=== 步骤8: 转换为消息格式 ===")
|
|
205
|
+
messages = assembler.to_messages()
|
|
206
|
+
for i, message in enumerate(messages):
|
|
207
|
+
print(
|
|
208
|
+
f"消息 {i + 1}: {message['role']} - {message['content'][:50]}{'...' if len(message['content']) > 50 else ''}"
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
return assembled_context
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def demonstrate_performance_comparison():
|
|
215
|
+
"""Performance Comparison Demo"""
|
|
216
|
+
print("\n=== 性能对比演示 ===")
|
|
217
|
+
|
|
218
|
+
# Simulate multi-turn dialogue scenarios
|
|
219
|
+
base_content = {
|
|
220
|
+
"system": "你是一个有用的AI助手。",
|
|
221
|
+
"task": "用户的问题描述。",
|
|
222
|
+
"history": "之前的对话历史。",
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
budget_allocations = [
|
|
226
|
+
BudgetAllocation("system", 50, 2.0),
|
|
227
|
+
BudgetAllocation("task", 100, 1.5),
|
|
228
|
+
BudgetAllocation("history", 200, 1.0),
|
|
229
|
+
]
|
|
230
|
+
|
|
231
|
+
assembler = ContextAssembler()
|
|
232
|
+
|
|
233
|
+
# Simulate multi-turn dialogue
|
|
234
|
+
conversation_rounds = 5
|
|
235
|
+
content_updates = [
|
|
236
|
+
"用户的问题描述更新。",
|
|
237
|
+
"用户的问题描述更新,添加了更多细节。",
|
|
238
|
+
"用户的问题描述更新,添加了更多细节和背景信息。",
|
|
239
|
+
"用户的问题描述更新,添加了更多细节、背景信息和具体需求。",
|
|
240
|
+
"用户的问题描述更新,添加了更多细节、背景信息、具体需求和期望结果。",
|
|
241
|
+
]
|
|
242
|
+
|
|
243
|
+
print("\n=== 传统全量处理模拟 ===")
|
|
244
|
+
for i in range(conversation_rounds):
|
|
245
|
+
print(f"\n--- 第{i + 1}轮对话 ---")
|
|
246
|
+
|
|
247
|
+
# Changelog
|
|
248
|
+
updated_content = base_content.copy()
|
|
249
|
+
updated_content["task"] = content_updates[i]
|
|
250
|
+
updated_content["history"] = f"之前的对话历史(第{i + 1}轮)。" + "\n".join(
|
|
251
|
+
[f"对话历史{j}" for j in range(i + 1)]
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# Full processing is required each time
|
|
255
|
+
assembled_context = assembler.assemble_context(
|
|
256
|
+
content_sections=updated_content,
|
|
257
|
+
budget_allocations=budget_allocations,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
print(f"总token数: {assembled_context.total_tokens}")
|
|
261
|
+
print(f"处理的部分数量: {len(assembled_context.sections)}")
|
|
262
|
+
print("特点:每次都需要处理所有内容,无法利用之前的处理结果")
|
|
263
|
+
|
|
264
|
+
print("\n=== 传统方式的特点 ===")
|
|
265
|
+
print("1. 每次都需要传入完整的内容进行全量处理")
|
|
266
|
+
print("2. 无法利用之前的处理结果,每次都从零开始")
|
|
267
|
+
print("3. 适合内容变化较大的场景")
|
|
268
|
+
print("4. 实现相对简单,逻辑清晰")
|
|
269
|
+
print("5. 在内容变化小时会造成不必要的重复计算")
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def demonstrate_memory_usage():
|
|
273
|
+
"""Demonstrate memory usage"""
|
|
274
|
+
print("\n=== 内存使用演示 ===")
|
|
275
|
+
|
|
276
|
+
# Create large amounts of content
|
|
277
|
+
large_content_sections = {}
|
|
278
|
+
for i in range(10):
|
|
279
|
+
large_content_sections[f"section_{i}"] = (
|
|
280
|
+
f"这是第{i + 1}个部分的内容,包含大量文本数据。" + "这是一个测试句。" * 50
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
budget_allocations = [BudgetAllocation(f"section_{i}", 200, 1.0) for i in range(10)]
|
|
284
|
+
|
|
285
|
+
assembler = ContextAssembler()
|
|
286
|
+
|
|
287
|
+
print("处理大量内容...")
|
|
288
|
+
assembled_context = assembler.assemble_context(
|
|
289
|
+
content_sections=large_content_sections,
|
|
290
|
+
budget_allocations=budget_allocations,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
print(f"处理的部分数量: {len(assembled_context.sections)}")
|
|
294
|
+
print(f"总token数: {assembled_context.total_tokens}")
|
|
295
|
+
print(
|
|
296
|
+
f"平均每个部分的token数: {assembled_context.total_tokens / len(assembled_context.sections):.1f}"
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
print("\n内存使用特点:")
|
|
300
|
+
print("- 每次组装都需要在内存中保存所有内容的完整副本")
|
|
301
|
+
print("- 对于大型内容,内存使用量会线性增长")
|
|
302
|
+
print("- 无法通过增量更新来优化内存使用")
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def main():
|
|
306
|
+
"""Main function"""
|
|
307
|
+
print("ContextAssembler 传统全量处理示例")
|
|
308
|
+
print("=" * 50)
|
|
309
|
+
|
|
310
|
+
# Demonstrate the basic workflow
|
|
311
|
+
demonstrate_traditional_workflow()
|
|
312
|
+
|
|
313
|
+
# Performance Comparison Demo
|
|
314
|
+
demonstrate_performance_comparison()
|
|
315
|
+
|
|
316
|
+
# Demonstrate memory usage
|
|
317
|
+
demonstrate_memory_usage()
|
|
318
|
+
|
|
319
|
+
print("\n=== 总结 ===")
|
|
320
|
+
print("传统全量处理方式的特点:")
|
|
321
|
+
print("✓ 实现简单,逻辑清晰")
|
|
322
|
+
print("✓ 适合内容变化较大的场景")
|
|
323
|
+
print("✓ 每次处理都是独立的,不会累积错误")
|
|
324
|
+
print("✗ 每次都需要全量处理,无法利用之前的计算结果")
|
|
325
|
+
print("✗ 在内容变化小时造成不必要的重复计算")
|
|
326
|
+
print("✗ 内存使用量相对较大")
|
|
327
|
+
print("\n适用场景:")
|
|
328
|
+
print("- 内容变化频繁且变化较大的场景")
|
|
329
|
+
print("- 对实时性要求不高的场景")
|
|
330
|
+
print("- 实现简单性比性能更重要的场景")
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
if __name__ == "__main__":
|
|
334
|
+
main()
|