jarvis-ai-assistant 0.7.0__py3-none-any.whl → 0.7.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jarvis/__init__.py +1 -1
- jarvis/jarvis_agent/__init__.py +243 -139
- jarvis/jarvis_agent/agent_manager.py +5 -10
- jarvis/jarvis_agent/builtin_input_handler.py +2 -6
- jarvis/jarvis_agent/config_editor.py +2 -7
- jarvis/jarvis_agent/event_bus.py +82 -12
- jarvis/jarvis_agent/file_context_handler.py +265 -15
- jarvis/jarvis_agent/file_methodology_manager.py +3 -4
- jarvis/jarvis_agent/jarvis.py +113 -98
- jarvis/jarvis_agent/language_extractors/__init__.py +57 -0
- jarvis/jarvis_agent/language_extractors/c_extractor.py +21 -0
- jarvis/jarvis_agent/language_extractors/cpp_extractor.py +21 -0
- jarvis/jarvis_agent/language_extractors/go_extractor.py +21 -0
- jarvis/jarvis_agent/language_extractors/java_extractor.py +84 -0
- jarvis/jarvis_agent/language_extractors/javascript_extractor.py +79 -0
- jarvis/jarvis_agent/language_extractors/python_extractor.py +21 -0
- jarvis/jarvis_agent/language_extractors/rust_extractor.py +21 -0
- jarvis/jarvis_agent/language_extractors/typescript_extractor.py +84 -0
- jarvis/jarvis_agent/language_support_info.py +486 -0
- jarvis/jarvis_agent/main.py +6 -12
- jarvis/jarvis_agent/memory_manager.py +7 -16
- jarvis/jarvis_agent/methodology_share_manager.py +10 -16
- jarvis/jarvis_agent/prompt_manager.py +1 -1
- jarvis/jarvis_agent/prompts.py +193 -171
- jarvis/jarvis_agent/protocols.py +8 -12
- jarvis/jarvis_agent/run_loop.py +77 -14
- jarvis/jarvis_agent/session_manager.py +2 -3
- jarvis/jarvis_agent/share_manager.py +12 -21
- jarvis/jarvis_agent/shell_input_handler.py +1 -2
- jarvis/jarvis_agent/task_analyzer.py +26 -4
- jarvis/jarvis_agent/task_manager.py +11 -27
- jarvis/jarvis_agent/tool_executor.py +2 -3
- jarvis/jarvis_agent/tool_share_manager.py +12 -24
- jarvis/jarvis_agent/web_server.py +55 -20
- jarvis/jarvis_c2rust/__init__.py +5 -5
- jarvis/jarvis_c2rust/cli.py +461 -499
- jarvis/jarvis_c2rust/collector.py +45 -53
- jarvis/jarvis_c2rust/constants.py +26 -0
- jarvis/jarvis_c2rust/library_replacer.py +264 -132
- jarvis/jarvis_c2rust/llm_module_agent.py +162 -190
- jarvis/jarvis_c2rust/loaders.py +207 -0
- jarvis/jarvis_c2rust/models.py +28 -0
- jarvis/jarvis_c2rust/optimizer.py +1592 -395
- jarvis/jarvis_c2rust/transpiler.py +1722 -1064
- jarvis/jarvis_c2rust/utils.py +385 -0
- jarvis/jarvis_code_agent/build_validation_config.py +2 -3
- jarvis/jarvis_code_agent/code_agent.py +394 -320
- jarvis/jarvis_code_agent/code_analyzer/__init__.py +3 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/base.py +4 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/cmake.py +17 -2
- jarvis/jarvis_code_agent/code_analyzer/build_validator/fallback.py +3 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/go.py +36 -4
- jarvis/jarvis_code_agent/code_analyzer/build_validator/java_gradle.py +9 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/java_maven.py +9 -0
- jarvis/jarvis_code_agent/code_analyzer/build_validator/makefile.py +12 -1
- jarvis/jarvis_code_agent/code_analyzer/build_validator/nodejs.py +22 -5
- jarvis/jarvis_code_agent/code_analyzer/build_validator/python.py +57 -32
- jarvis/jarvis_code_agent/code_analyzer/build_validator/rust.py +62 -6
- jarvis/jarvis_code_agent/code_analyzer/build_validator/validator.py +8 -9
- jarvis/jarvis_code_agent/code_analyzer/context_manager.py +290 -5
- jarvis/jarvis_code_agent/code_analyzer/language_support.py +21 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/__init__.py +21 -3
- jarvis/jarvis_code_agent/code_analyzer/languages/c_cpp_language.py +72 -4
- jarvis/jarvis_code_agent/code_analyzer/languages/go_language.py +35 -3
- jarvis/jarvis_code_agent/code_analyzer/languages/java_language.py +212 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/javascript_language.py +254 -0
- jarvis/jarvis_code_agent/code_analyzer/languages/python_language.py +52 -2
- jarvis/jarvis_code_agent/code_analyzer/languages/rust_language.py +73 -1
- jarvis/jarvis_code_agent/code_analyzer/languages/typescript_language.py +280 -0
- jarvis/jarvis_code_agent/code_analyzer/llm_context_recommender.py +306 -152
- jarvis/jarvis_code_agent/code_analyzer/structured_code.py +556 -0
- jarvis/jarvis_code_agent/code_analyzer/symbol_extractor.py +193 -18
- jarvis/jarvis_code_agent/code_analyzer/tree_sitter_extractor.py +18 -8
- jarvis/jarvis_code_agent/lint.py +258 -27
- jarvis/jarvis_code_agent/utils.py +0 -1
- jarvis/jarvis_code_analysis/code_review.py +19 -24
- jarvis/jarvis_data/config_schema.json +53 -26
- jarvis/jarvis_git_squash/main.py +4 -5
- jarvis/jarvis_git_utils/git_commiter.py +44 -49
- jarvis/jarvis_mcp/sse_mcp_client.py +20 -27
- jarvis/jarvis_mcp/stdio_mcp_client.py +11 -12
- jarvis/jarvis_mcp/streamable_mcp_client.py +15 -14
- jarvis/jarvis_memory_organizer/memory_organizer.py +55 -74
- jarvis/jarvis_methodology/main.py +32 -48
- jarvis/jarvis_multi_agent/__init__.py +79 -61
- jarvis/jarvis_multi_agent/main.py +3 -7
- jarvis/jarvis_platform/base.py +469 -199
- jarvis/jarvis_platform/human.py +7 -8
- jarvis/jarvis_platform/kimi.py +30 -36
- jarvis/jarvis_platform/openai.py +65 -27
- jarvis/jarvis_platform/registry.py +26 -10
- jarvis/jarvis_platform/tongyi.py +24 -25
- jarvis/jarvis_platform/yuanbao.py +31 -42
- jarvis/jarvis_platform_manager/main.py +66 -77
- jarvis/jarvis_platform_manager/service.py +8 -13
- jarvis/jarvis_rag/cli.py +49 -51
- jarvis/jarvis_rag/embedding_manager.py +13 -18
- jarvis/jarvis_rag/llm_interface.py +8 -9
- jarvis/jarvis_rag/query_rewriter.py +10 -21
- jarvis/jarvis_rag/rag_pipeline.py +24 -27
- jarvis/jarvis_rag/reranker.py +4 -5
- jarvis/jarvis_rag/retriever.py +28 -30
- jarvis/jarvis_sec/__init__.py +220 -3520
- jarvis/jarvis_sec/agents.py +143 -0
- jarvis/jarvis_sec/analysis.py +276 -0
- jarvis/jarvis_sec/cli.py +29 -6
- jarvis/jarvis_sec/clustering.py +1439 -0
- jarvis/jarvis_sec/file_manager.py +427 -0
- jarvis/jarvis_sec/parsers.py +73 -0
- jarvis/jarvis_sec/prompts.py +268 -0
- jarvis/jarvis_sec/report.py +83 -4
- jarvis/jarvis_sec/review.py +453 -0
- jarvis/jarvis_sec/utils.py +499 -0
- jarvis/jarvis_sec/verification.py +848 -0
- jarvis/jarvis_sec/workflow.py +7 -0
- jarvis/jarvis_smart_shell/main.py +38 -87
- jarvis/jarvis_stats/cli.py +1 -1
- jarvis/jarvis_stats/stats.py +7 -7
- jarvis/jarvis_stats/storage.py +15 -21
- jarvis/jarvis_tools/clear_memory.py +3 -20
- jarvis/jarvis_tools/cli/main.py +20 -23
- jarvis/jarvis_tools/edit_file.py +1066 -0
- jarvis/jarvis_tools/execute_script.py +42 -21
- jarvis/jarvis_tools/file_analyzer.py +6 -9
- jarvis/jarvis_tools/generate_new_tool.py +11 -20
- jarvis/jarvis_tools/lsp_client.py +1552 -0
- jarvis/jarvis_tools/methodology.py +2 -3
- jarvis/jarvis_tools/read_code.py +1525 -87
- jarvis/jarvis_tools/read_symbols.py +2 -3
- jarvis/jarvis_tools/read_webpage.py +7 -10
- jarvis/jarvis_tools/registry.py +370 -181
- jarvis/jarvis_tools/retrieve_memory.py +20 -19
- jarvis/jarvis_tools/rewrite_file.py +105 -0
- jarvis/jarvis_tools/save_memory.py +3 -15
- jarvis/jarvis_tools/search_web.py +3 -7
- jarvis/jarvis_tools/sub_agent.py +17 -6
- jarvis/jarvis_tools/sub_code_agent.py +14 -16
- jarvis/jarvis_tools/virtual_tty.py +54 -32
- jarvis/jarvis_utils/clipboard.py +7 -10
- jarvis/jarvis_utils/config.py +98 -63
- jarvis/jarvis_utils/embedding.py +5 -5
- jarvis/jarvis_utils/fzf.py +8 -8
- jarvis/jarvis_utils/git_utils.py +81 -67
- jarvis/jarvis_utils/input.py +24 -49
- jarvis/jarvis_utils/jsonnet_compat.py +465 -0
- jarvis/jarvis_utils/methodology.py +33 -35
- jarvis/jarvis_utils/utils.py +245 -202
- {jarvis_ai_assistant-0.7.0.dist-info → jarvis_ai_assistant-0.7.8.dist-info}/METADATA +205 -70
- jarvis_ai_assistant-0.7.8.dist-info/RECORD +218 -0
- jarvis/jarvis_agent/edit_file_handler.py +0 -584
- jarvis/jarvis_agent/rewrite_file_handler.py +0 -141
- jarvis/jarvis_agent/task_planner.py +0 -496
- jarvis/jarvis_platform/ai8.py +0 -332
- jarvis/jarvis_tools/ask_user.py +0 -54
- jarvis_ai_assistant-0.7.0.dist-info/RECORD +0 -192
- {jarvis_ai_assistant-0.7.0.dist-info → jarvis_ai_assistant-0.7.8.dist-info}/WHEEL +0 -0
- {jarvis_ai_assistant-0.7.0.dist-info → jarvis_ai_assistant-0.7.8.dist-info}/entry_points.txt +0 -0
- {jarvis_ai_assistant-0.7.0.dist-info → jarvis_ai_assistant-0.7.8.dist-info}/licenses/LICENSE +0 -0
- {jarvis_ai_assistant-0.7.0.dist-info → jarvis_ai_assistant-0.7.8.dist-info}/top_level.txt +0 -0
jarvis/jarvis_platform/base.py
CHANGED
|
@@ -15,34 +15,36 @@ from rich.status import Status # type: ignore
|
|
|
15
15
|
from rich.text import Text # type: ignore
|
|
16
16
|
|
|
17
17
|
from jarvis.jarvis_utils.config import (
|
|
18
|
-
get_max_input_token_count,
|
|
19
18
|
get_pretty_output,
|
|
20
19
|
is_print_prompt,
|
|
21
20
|
is_immediate_abort,
|
|
22
21
|
is_save_session_history,
|
|
23
22
|
get_data_dir,
|
|
23
|
+
get_max_input_token_count,
|
|
24
|
+
get_conversation_turn_threshold,
|
|
24
25
|
)
|
|
25
|
-
from jarvis.jarvis_utils.embedding import split_text_into_chunks
|
|
26
26
|
from jarvis.jarvis_utils.globals import set_in_chat, get_interrupt, console
|
|
27
27
|
import jarvis.jarvis_utils.globals as G
|
|
28
|
-
from jarvis.jarvis_utils.output import OutputType, PrettyOutput
|
|
28
|
+
from jarvis.jarvis_utils.output import OutputType, PrettyOutput # 保留用于语法高亮
|
|
29
29
|
from jarvis.jarvis_utils.tag import ct, ot
|
|
30
|
-
from jarvis.jarvis_utils.utils import
|
|
30
|
+
from jarvis.jarvis_utils.utils import while_success, while_true
|
|
31
|
+
from jarvis.jarvis_utils.embedding import get_context_token_count
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
class BasePlatform(ABC):
|
|
34
|
-
"""
|
|
35
|
+
"""大语言模型基类"""
|
|
35
36
|
|
|
36
37
|
def __init__(self):
|
|
37
|
-
"""
|
|
38
|
+
"""初始化模型"""
|
|
38
39
|
self.suppress_output = True # 添加输出控制标志
|
|
39
40
|
self.web = False # 添加web属性,默认false
|
|
40
41
|
self._saved = False
|
|
41
42
|
self.model_group: Optional[str] = None
|
|
42
43
|
self._session_history_file: Optional[str] = None
|
|
44
|
+
self._conversation_turn = 0 # 对话轮次计数器
|
|
43
45
|
|
|
44
46
|
def __enter__(self) -> Self:
|
|
45
|
-
"""
|
|
47
|
+
"""进入上下文管理器"""
|
|
46
48
|
return self
|
|
47
49
|
|
|
48
50
|
def __exit__(
|
|
@@ -51,23 +53,24 @@ class BasePlatform(ABC):
|
|
|
51
53
|
exc_val: Optional[BaseException],
|
|
52
54
|
exc_tb: Optional[TracebackType],
|
|
53
55
|
) -> None:
|
|
54
|
-
"""
|
|
56
|
+
"""退出上下文管理器"""
|
|
55
57
|
if not self._saved:
|
|
56
58
|
self.delete_chat()
|
|
57
59
|
|
|
58
60
|
@abstractmethod
|
|
59
61
|
def set_model_name(self, model_name: str):
|
|
60
|
-
"""
|
|
62
|
+
"""设置模型名称"""
|
|
61
63
|
raise NotImplementedError("set_model_name is not implemented")
|
|
62
64
|
|
|
63
65
|
def reset(self):
|
|
64
|
-
"""
|
|
66
|
+
"""重置模型"""
|
|
65
67
|
self.delete_chat()
|
|
66
68
|
self._session_history_file = None
|
|
69
|
+
self._conversation_turn = 0 # 重置对话轮次计数器
|
|
67
70
|
|
|
68
71
|
@abstractmethod
|
|
69
72
|
def chat(self, message: str) -> Generator[str, None, None]:
|
|
70
|
-
"""
|
|
73
|
+
"""执行对话"""
|
|
71
74
|
raise NotImplementedError("chat is not implemented")
|
|
72
75
|
|
|
73
76
|
@abstractmethod
|
|
@@ -76,189 +79,357 @@ class BasePlatform(ABC):
|
|
|
76
79
|
|
|
77
80
|
@abstractmethod
|
|
78
81
|
def support_upload_files(self) -> bool:
|
|
79
|
-
"""
|
|
82
|
+
"""检查平台是否支持文件上传"""
|
|
80
83
|
return False
|
|
81
84
|
|
|
82
|
-
def
|
|
85
|
+
def _format_progress_bar(self, percent: float, width: int = 20) -> str:
|
|
86
|
+
"""格式化进度条字符串
|
|
87
|
+
|
|
88
|
+
参数:
|
|
89
|
+
percent: 百分比 (0-100)
|
|
90
|
+
width: 进度条宽度(字符数)
|
|
91
|
+
|
|
92
|
+
返回:
|
|
93
|
+
str: 格式化的进度条字符串
|
|
94
|
+
"""
|
|
95
|
+
# 限制百分比范围
|
|
96
|
+
percent = max(0, min(100, percent))
|
|
97
|
+
|
|
98
|
+
# 计算填充的字符数
|
|
99
|
+
filled = int(width * percent / 100)
|
|
100
|
+
empty = width - filled
|
|
101
|
+
|
|
102
|
+
# 根据百分比选择颜色
|
|
103
|
+
if percent >= 90:
|
|
104
|
+
color = "red"
|
|
105
|
+
elif percent >= 80:
|
|
106
|
+
color = "yellow"
|
|
107
|
+
else:
|
|
108
|
+
color = "green"
|
|
109
|
+
|
|
110
|
+
# 构建进度条:使用 █ 表示已填充,░ 表示未填充
|
|
111
|
+
bar = "█" * filled + "░" * empty
|
|
112
|
+
|
|
113
|
+
return f"[{color}]{bar}[/{color}]"
|
|
114
|
+
|
|
115
|
+
def _get_token_usage_info(self, current_response: str = "") -> Tuple[float, str, str]:
|
|
116
|
+
"""获取 token 使用信息
|
|
117
|
+
|
|
118
|
+
参数:
|
|
119
|
+
current_response: 当前响应内容(用于计算流式输出时的 token)
|
|
120
|
+
|
|
121
|
+
返回:
|
|
122
|
+
Tuple[float, str, str]: (usage_percent, percent_color, progress_bar)
|
|
123
|
+
"""
|
|
124
|
+
try:
|
|
125
|
+
history_tokens = self.get_used_token_count()
|
|
126
|
+
current_response_tokens = get_context_token_count(current_response)
|
|
127
|
+
total_tokens = history_tokens + current_response_tokens
|
|
128
|
+
max_tokens = get_max_input_token_count(self.model_group)
|
|
129
|
+
|
|
130
|
+
if max_tokens > 0:
|
|
131
|
+
usage_percent = (total_tokens / max_tokens) * 100
|
|
132
|
+
if usage_percent >= 90:
|
|
133
|
+
percent_color = "red"
|
|
134
|
+
elif usage_percent >= 80:
|
|
135
|
+
percent_color = "yellow"
|
|
136
|
+
else:
|
|
137
|
+
percent_color = "green"
|
|
138
|
+
progress_bar = self._format_progress_bar(usage_percent, width=15)
|
|
139
|
+
return usage_percent, percent_color, progress_bar
|
|
140
|
+
return 0.0, "green", ""
|
|
141
|
+
except Exception:
|
|
142
|
+
return 0.0, "green", ""
|
|
143
|
+
|
|
144
|
+
def _update_panel_subtitle_with_token(
|
|
145
|
+
self, panel: Panel, response: str, is_completed: bool = False, duration: float = 0.0
|
|
146
|
+
) -> None:
|
|
147
|
+
"""更新面板的 subtitle,包含 token 使用信息
|
|
148
|
+
|
|
149
|
+
参数:
|
|
150
|
+
panel: 要更新的面板
|
|
151
|
+
response: 当前响应内容
|
|
152
|
+
is_completed: 是否已完成
|
|
153
|
+
duration: 耗时(秒)
|
|
154
|
+
"""
|
|
155
|
+
from datetime import datetime
|
|
156
|
+
|
|
157
|
+
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
158
|
+
try:
|
|
159
|
+
usage_percent, percent_color, progress_bar = self._get_token_usage_info(response)
|
|
160
|
+
max_tokens = get_max_input_token_count(self.model_group)
|
|
161
|
+
total_tokens = self.get_used_token_count() + get_context_token_count(response)
|
|
162
|
+
|
|
163
|
+
threshold = get_conversation_turn_threshold()
|
|
164
|
+
if is_completed:
|
|
165
|
+
if max_tokens > 0 and progress_bar:
|
|
166
|
+
panel.subtitle = (
|
|
167
|
+
f"[bold green]✓ {current_time} | ({self._conversation_turn}/{threshold}) | 对话完成耗时: {duration:.2f}秒 | "
|
|
168
|
+
f"Token: {progress_bar} "
|
|
169
|
+
f"[{percent_color}]{usage_percent:.1f}% ({total_tokens}/{max_tokens})[/{percent_color}][/bold green]"
|
|
170
|
+
)
|
|
171
|
+
else:
|
|
172
|
+
panel.subtitle = f"[bold green]✓ {current_time} | ({self._conversation_turn}/{threshold}) | 对话完成耗时: {duration:.2f}秒[/bold green]"
|
|
173
|
+
else:
|
|
174
|
+
if max_tokens > 0 and progress_bar:
|
|
175
|
+
panel.subtitle = (
|
|
176
|
+
f"[yellow]{current_time} | ({self._conversation_turn}/{threshold}) | 正在回答... (按 Ctrl+C 中断) | "
|
|
177
|
+
f"Token: {progress_bar} "
|
|
178
|
+
f"[{percent_color}]{usage_percent:.1f}% ({total_tokens}/{max_tokens})[/{percent_color}][/yellow]"
|
|
179
|
+
)
|
|
180
|
+
else:
|
|
181
|
+
panel.subtitle = f"[yellow]{current_time} | ({self._conversation_turn}/{threshold}) | 正在回答... (按 Ctrl+C 中断)[/yellow]"
|
|
182
|
+
except Exception:
|
|
183
|
+
threshold = get_conversation_turn_threshold()
|
|
184
|
+
if is_completed:
|
|
185
|
+
panel.subtitle = f"[bold green]✓ {current_time} | ({self._conversation_turn}/{threshold}) | 对话完成耗时: {duration:.2f}秒[/bold green]"
|
|
186
|
+
else:
|
|
187
|
+
panel.subtitle = f"[yellow]{current_time} | ({self._conversation_turn}/{threshold}) | 正在回答... (按 Ctrl+C 中断)[/yellow]"
|
|
188
|
+
|
|
189
|
+
def _chat_with_pretty_output(self, message: str, start_time: float) -> str:
|
|
190
|
+
"""使用 pretty output 模式进行聊天
|
|
191
|
+
|
|
192
|
+
参数:
|
|
193
|
+
message: 用户消息
|
|
194
|
+
start_time: 开始时间
|
|
195
|
+
|
|
196
|
+
返回:
|
|
197
|
+
str: 模型响应
|
|
198
|
+
"""
|
|
83
199
|
import time
|
|
200
|
+
|
|
201
|
+
chat_iterator = self.chat(message)
|
|
202
|
+
first_chunk = None
|
|
203
|
+
|
|
204
|
+
with Status(
|
|
205
|
+
f"🤔 {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} 正在思考中...",
|
|
206
|
+
spinner="dots",
|
|
207
|
+
console=console,
|
|
208
|
+
):
|
|
209
|
+
try:
|
|
210
|
+
while True:
|
|
211
|
+
first_chunk = next(chat_iterator)
|
|
212
|
+
if first_chunk:
|
|
213
|
+
break
|
|
214
|
+
except StopIteration:
|
|
215
|
+
self._append_session_history(message, "")
|
|
216
|
+
return ""
|
|
217
|
+
|
|
218
|
+
text_content = Text(overflow="fold")
|
|
219
|
+
panel = Panel(
|
|
220
|
+
text_content,
|
|
221
|
+
title=f"[bold cyan]{(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()}[/bold cyan]",
|
|
222
|
+
subtitle="[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]",
|
|
223
|
+
border_style="bright_blue",
|
|
224
|
+
box=box.ROUNDED,
|
|
225
|
+
expand=True,
|
|
226
|
+
)
|
|
84
227
|
|
|
85
|
-
|
|
228
|
+
response = ""
|
|
229
|
+
last_subtitle_update_time = time.time()
|
|
230
|
+
subtitle_update_interval = 3 # subtitle 更新间隔(秒),减少更新频率避免重复渲染标题
|
|
231
|
+
update_count = 0 # 更新计数器,用于控制 subtitle 更新频率
|
|
232
|
+
with Live(panel, refresh_per_second=4, transient=False) as live:
|
|
233
|
+
def _update_panel_content(content: str, update_subtitle: bool = False):
|
|
234
|
+
nonlocal response, last_subtitle_update_time, update_count
|
|
235
|
+
text_content.append(content, style="bright_white")
|
|
236
|
+
update_count += 1
|
|
237
|
+
|
|
238
|
+
# Scrolling Logic - 只在内容超过一定行数时才应用滚动
|
|
239
|
+
max_text_height = console.height - 5
|
|
240
|
+
if max_text_height <= 0:
|
|
241
|
+
max_text_height = 1
|
|
242
|
+
|
|
243
|
+
lines = text_content.wrap(
|
|
244
|
+
console,
|
|
245
|
+
console.width - 4 if console.width > 4 else 1,
|
|
246
|
+
)
|
|
86
247
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
248
|
+
# 只在内容超过最大高度时才截取,减少不必要的操作
|
|
249
|
+
if len(lines) > max_text_height:
|
|
250
|
+
text_content.plain = "\n".join(
|
|
251
|
+
[line.plain for line in lines[-max_text_height:]]
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
# 只在需要时更新 subtitle(减少更新频率,避免重复渲染标题)
|
|
255
|
+
# 策略:每 10 次内容更新或每 3 秒更新一次 subtitle
|
|
256
|
+
current_time = time.time()
|
|
257
|
+
should_update_subtitle = (
|
|
258
|
+
update_subtitle
|
|
259
|
+
or update_count % 10 == 0 # 每 10 次更新一次
|
|
260
|
+
or (current_time - last_subtitle_update_time) >= subtitle_update_interval
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
if should_update_subtitle:
|
|
264
|
+
self._update_panel_subtitle_with_token(panel, response, is_completed=False)
|
|
265
|
+
last_subtitle_update_time = current_time
|
|
266
|
+
|
|
267
|
+
# 更新 panel(只更新内容,subtitle 更新频率已降低)
|
|
268
|
+
live.update(panel)
|
|
269
|
+
|
|
270
|
+
# Process first chunk
|
|
271
|
+
response += first_chunk
|
|
272
|
+
if first_chunk:
|
|
273
|
+
_update_panel_content(first_chunk, update_subtitle=True) # 第一次更新时更新 subtitle
|
|
274
|
+
|
|
275
|
+
# 缓存机制:降低更新频率,减少界面闪烁
|
|
276
|
+
buffer = ""
|
|
277
|
+
last_update_time = time.time()
|
|
278
|
+
update_interval = 1
|
|
279
|
+
min_buffer_size = 20
|
|
280
|
+
|
|
281
|
+
def _flush_buffer():
|
|
282
|
+
nonlocal buffer, last_update_time
|
|
283
|
+
if buffer:
|
|
284
|
+
_update_panel_content(buffer)
|
|
285
|
+
buffer = ""
|
|
286
|
+
last_update_time = time.time()
|
|
287
|
+
|
|
288
|
+
# Process rest of the chunks
|
|
289
|
+
for s in chat_iterator:
|
|
290
|
+
if not s:
|
|
291
|
+
continue
|
|
292
|
+
response += s
|
|
293
|
+
buffer += s
|
|
294
|
+
|
|
295
|
+
current_time = time.time()
|
|
296
|
+
should_update = (
|
|
297
|
+
len(buffer) >= min_buffer_size
|
|
298
|
+
or (current_time - last_update_time) >= update_interval
|
|
299
|
+
)
|
|
91
300
|
|
|
92
|
-
|
|
301
|
+
if should_update:
|
|
302
|
+
_flush_buffer()
|
|
303
|
+
|
|
304
|
+
if is_immediate_abort() and get_interrupt():
|
|
305
|
+
_flush_buffer()
|
|
306
|
+
self._append_session_history(message, response)
|
|
307
|
+
return response
|
|
308
|
+
|
|
309
|
+
_flush_buffer()
|
|
310
|
+
# 在结束前,将面板内容替换为完整响应,确保最后一次渲染的 panel 显示全部内容
|
|
311
|
+
if response:
|
|
312
|
+
text_content.plain = response
|
|
313
|
+
# 最后更新 subtitle 和 panel
|
|
314
|
+
end_time = time.time()
|
|
315
|
+
duration = end_time - start_time
|
|
316
|
+
self._update_panel_subtitle_with_token(panel, response, is_completed=True, duration=duration)
|
|
317
|
+
# 最后更新 panel,Live 上下文退出时会自动打印(transient=False)
|
|
318
|
+
live.update(panel)
|
|
319
|
+
# 注意:不要在这里调用 console.print(),因为 Live 退出时会自动打印 panel
|
|
320
|
+
# Live 退出后仅添加空行分隔,不再重复打印 panel,避免内容重复
|
|
321
|
+
console.print()
|
|
322
|
+
return response
|
|
93
323
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
5,
|
|
123
|
-
):
|
|
124
|
-
response += trunk
|
|
125
|
-
|
|
126
|
-
PrettyOutput.print("提交完成", OutputType.SUCCESS)
|
|
127
|
-
response += "\n" + while_true(
|
|
128
|
-
lambda: while_success(
|
|
129
|
-
lambda: self._chat("内容已经全部提供完毕,请根据内容继续"), 5
|
|
130
|
-
),
|
|
131
|
-
5,
|
|
132
|
-
)
|
|
133
|
-
else:
|
|
134
|
-
response = ""
|
|
135
|
-
|
|
136
|
-
if not self.suppress_output:
|
|
137
|
-
if get_pretty_output():
|
|
138
|
-
chat_iterator = self.chat(message)
|
|
139
|
-
first_chunk = None
|
|
140
|
-
|
|
141
|
-
with Status(
|
|
142
|
-
f"🤔 {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} 正在思考中...",
|
|
143
|
-
spinner="dots",
|
|
144
|
-
console=console,
|
|
145
|
-
):
|
|
146
|
-
try:
|
|
147
|
-
while True:
|
|
148
|
-
first_chunk = next(chat_iterator)
|
|
149
|
-
if first_chunk:
|
|
150
|
-
break
|
|
151
|
-
except StopIteration:
|
|
152
|
-
self._append_session_history(message, "")
|
|
153
|
-
return ""
|
|
154
|
-
|
|
155
|
-
text_content = Text(overflow="fold")
|
|
156
|
-
panel = Panel(
|
|
157
|
-
text_content,
|
|
158
|
-
title=f"[bold cyan]{(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()}[/bold cyan]",
|
|
159
|
-
subtitle="[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]",
|
|
160
|
-
border_style="bright_blue",
|
|
161
|
-
box=box.ROUNDED,
|
|
162
|
-
expand=True, # 允许面板自动调整大小
|
|
163
|
-
)
|
|
324
|
+
def _chat_with_simple_output(self, message: str, start_time: float) -> str:
|
|
325
|
+
"""使用简单输出模式进行聊天
|
|
326
|
+
|
|
327
|
+
参数:
|
|
328
|
+
message: 用户消息
|
|
329
|
+
start_time: 开始时间
|
|
330
|
+
|
|
331
|
+
返回:
|
|
332
|
+
str: 模型响应
|
|
333
|
+
"""
|
|
334
|
+
import time
|
|
335
|
+
|
|
336
|
+
console.print(
|
|
337
|
+
f"🤖 模型输出 - {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} (按 Ctrl+C 中断)",
|
|
338
|
+
soft_wrap=False,
|
|
339
|
+
)
|
|
340
|
+
response = ""
|
|
341
|
+
for s in self.chat(message):
|
|
342
|
+
console.print(s, end="")
|
|
343
|
+
response += s
|
|
344
|
+
if is_immediate_abort() and get_interrupt():
|
|
345
|
+
self._append_session_history(message, response)
|
|
346
|
+
return response
|
|
347
|
+
console.print()
|
|
348
|
+
end_time = time.time()
|
|
349
|
+
duration = end_time - start_time
|
|
350
|
+
console.print(f"✓ 对话完成耗时: {duration:.2f}秒")
|
|
351
|
+
return response
|
|
164
352
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
panel.subtitle = (
|
|
192
|
-
"[yellow]正在回答... (按 Ctrl+C 中断)[/yellow]"
|
|
193
|
-
)
|
|
194
|
-
live.update(panel)
|
|
195
|
-
|
|
196
|
-
# Process first chunk
|
|
197
|
-
response += first_chunk
|
|
198
|
-
if first_chunk:
|
|
199
|
-
_update_panel_content(first_chunk)
|
|
200
|
-
|
|
201
|
-
# Process rest of the chunks
|
|
202
|
-
for s in chat_iterator:
|
|
203
|
-
if not s:
|
|
204
|
-
continue
|
|
205
|
-
response += s # Accumulate the full response string
|
|
206
|
-
_update_panel_content(s)
|
|
207
|
-
|
|
208
|
-
if is_immediate_abort() and get_interrupt():
|
|
209
|
-
self._append_session_history(message, response)
|
|
210
|
-
return response # Return the partial response immediately
|
|
211
|
-
|
|
212
|
-
# At the end, display the entire response
|
|
213
|
-
text_content.plain = response
|
|
214
|
-
|
|
215
|
-
end_time = time.time()
|
|
216
|
-
duration = end_time - start_time
|
|
217
|
-
panel.subtitle = f"[bold green]✓ 对话完成耗时: {duration:.2f}秒[/bold green]"
|
|
218
|
-
live.update(panel)
|
|
219
|
-
console.print()
|
|
220
|
-
else:
|
|
221
|
-
# Print a clear prefix line before streaming model output (non-pretty mode)
|
|
222
|
-
console.print(
|
|
223
|
-
f"🤖 模型输出 - {(G.current_agent_name + ' · ') if G.current_agent_name else ''}{self.name()} (按 Ctrl+C 中断)",
|
|
224
|
-
soft_wrap=False,
|
|
225
|
-
)
|
|
226
|
-
for s in self.chat(message):
|
|
227
|
-
console.print(s, end="")
|
|
228
|
-
response += s
|
|
229
|
-
if is_immediate_abort() and get_interrupt():
|
|
230
|
-
self._append_session_history(message, response)
|
|
231
|
-
return response
|
|
232
|
-
console.print()
|
|
233
|
-
end_time = time.time()
|
|
234
|
-
duration = end_time - start_time
|
|
235
|
-
console.print(f"✓ 对话完成耗时: {duration:.2f}秒")
|
|
236
|
-
else:
|
|
237
|
-
for s in self.chat(message):
|
|
238
|
-
response += s
|
|
239
|
-
if is_immediate_abort() and get_interrupt():
|
|
240
|
-
self._append_session_history(message, response)
|
|
241
|
-
return response
|
|
242
|
-
# Keep original think tag handling
|
|
353
|
+
def _chat_with_suppressed_output(self, message: str) -> str:
|
|
354
|
+
"""使用静默模式进行聊天
|
|
355
|
+
|
|
356
|
+
参数:
|
|
357
|
+
message: 用户消息
|
|
358
|
+
|
|
359
|
+
返回:
|
|
360
|
+
str: 模型响应
|
|
361
|
+
"""
|
|
362
|
+
response = ""
|
|
363
|
+
for s in self.chat(message):
|
|
364
|
+
response += s
|
|
365
|
+
if is_immediate_abort() and get_interrupt():
|
|
366
|
+
self._append_session_history(message, response)
|
|
367
|
+
return response
|
|
368
|
+
return response
|
|
369
|
+
|
|
370
|
+
def _process_response(self, response: str) -> str:
|
|
371
|
+
"""处理响应,移除 think 标签
|
|
372
|
+
|
|
373
|
+
参数:
|
|
374
|
+
response: 原始响应
|
|
375
|
+
|
|
376
|
+
返回:
|
|
377
|
+
str: 处理后的响应
|
|
378
|
+
"""
|
|
243
379
|
response = re.sub(
|
|
244
380
|
ot("think") + r".*?" + ct("think"), "", response, flags=re.DOTALL
|
|
245
381
|
)
|
|
246
382
|
response = re.sub(
|
|
247
383
|
ot("thinking") + r".*?" + ct("thinking"), "", response, flags=re.DOTALL
|
|
248
384
|
)
|
|
249
|
-
|
|
385
|
+
return response
|
|
386
|
+
|
|
387
|
+
def _chat(self, message: str):
|
|
388
|
+
import time
|
|
389
|
+
|
|
390
|
+
start_time = time.time()
|
|
391
|
+
|
|
392
|
+
# 当输入为空白字符串时,打印警告并直接返回空字符串
|
|
393
|
+
if message.strip() == "":
|
|
394
|
+
print("⚠️ 输入为空白字符串,已忽略本次请求")
|
|
395
|
+
return ""
|
|
396
|
+
|
|
397
|
+
# 检查并截断消息以避免超出剩余token限制
|
|
398
|
+
message = self._truncate_message_if_needed(message)
|
|
399
|
+
|
|
400
|
+
# 根据输出模式选择不同的处理方式
|
|
401
|
+
if not self.suppress_output:
|
|
402
|
+
if get_pretty_output():
|
|
403
|
+
response = self._chat_with_pretty_output(message, start_time)
|
|
404
|
+
else:
|
|
405
|
+
response = self._chat_with_simple_output(message, start_time)
|
|
406
|
+
else:
|
|
407
|
+
response = self._chat_with_suppressed_output(message)
|
|
408
|
+
|
|
409
|
+
# 处理响应并保存会话历史
|
|
410
|
+
response = self._process_response(response)
|
|
250
411
|
self._append_session_history(message, response)
|
|
412
|
+
# 增加对话轮次计数
|
|
413
|
+
self._conversation_turn += 1
|
|
251
414
|
return response
|
|
252
415
|
|
|
253
416
|
def chat_until_success(self, message: str) -> str:
|
|
254
|
-
"""
|
|
417
|
+
"""与模型对话直到成功响应。"""
|
|
255
418
|
try:
|
|
256
419
|
set_in_chat(True)
|
|
257
420
|
if not self.suppress_output and is_print_prompt():
|
|
258
|
-
PrettyOutput.print(f"{message}", OutputType.USER)
|
|
259
|
-
|
|
260
|
-
|
|
421
|
+
PrettyOutput.print(f"{message}", OutputType.USER) # 保留用于语法高亮
|
|
422
|
+
|
|
423
|
+
result: str = ""
|
|
424
|
+
result = while_true(
|
|
425
|
+
lambda: while_success(lambda: self._chat(message))
|
|
261
426
|
)
|
|
427
|
+
|
|
428
|
+
# Check if result is empty or False (retry exhausted)
|
|
429
|
+
# Convert False to empty string for type safety
|
|
430
|
+
if result is False or result == "":
|
|
431
|
+
raise ValueError("返回结果为空")
|
|
432
|
+
|
|
262
433
|
from jarvis.jarvis_utils.globals import set_last_message
|
|
263
434
|
|
|
264
435
|
set_last_message(result)
|
|
@@ -268,88 +439,88 @@ class BasePlatform(ABC):
|
|
|
268
439
|
|
|
269
440
|
@abstractmethod
|
|
270
441
|
def name(self) -> str:
|
|
271
|
-
"""
|
|
442
|
+
"""模型名称"""
|
|
272
443
|
raise NotImplementedError("name is not implemented")
|
|
273
444
|
|
|
274
445
|
@classmethod
|
|
275
446
|
@abstractmethod
|
|
276
447
|
def platform_name(cls) -> str:
|
|
277
|
-
"""
|
|
448
|
+
"""平台名称"""
|
|
278
449
|
raise NotImplementedError("platform_name is not implemented")
|
|
279
450
|
|
|
280
451
|
@abstractmethod
|
|
281
452
|
def delete_chat(self) -> bool:
|
|
282
|
-
"""
|
|
453
|
+
"""删除对话"""
|
|
283
454
|
raise NotImplementedError("delete_chat is not implemented")
|
|
284
455
|
|
|
285
456
|
@abstractmethod
|
|
286
457
|
def save(self, file_path: str) -> bool:
|
|
287
|
-
"""
|
|
458
|
+
"""保存对话会话到文件。
|
|
288
459
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
460
|
+
注意:
|
|
461
|
+
此方法的实现应在成功保存后将`self._saved`设置为True,
|
|
462
|
+
以防止在对象销毁时删除会话。
|
|
292
463
|
|
|
293
|
-
|
|
294
|
-
file_path:
|
|
464
|
+
参数:
|
|
465
|
+
file_path: 保存会话文件的路径。
|
|
295
466
|
|
|
296
|
-
|
|
297
|
-
True
|
|
467
|
+
返回:
|
|
468
|
+
如果保存成功返回True,否则返回False。
|
|
298
469
|
"""
|
|
299
470
|
raise NotImplementedError("save is not implemented")
|
|
300
471
|
|
|
301
472
|
@abstractmethod
|
|
302
473
|
def restore(self, file_path: str) -> bool:
|
|
303
|
-
"""
|
|
474
|
+
"""从文件恢复对话会话。
|
|
304
475
|
|
|
305
|
-
|
|
306
|
-
file_path:
|
|
476
|
+
参数:
|
|
477
|
+
file_path: 要恢复会话文件的路径。
|
|
307
478
|
|
|
308
|
-
|
|
309
|
-
True
|
|
479
|
+
返回:
|
|
480
|
+
如果恢复成功返回True,否则返回False。
|
|
310
481
|
"""
|
|
311
482
|
raise NotImplementedError("restore is not implemented")
|
|
312
483
|
|
|
313
484
|
@abstractmethod
|
|
314
485
|
def set_system_prompt(self, message: str):
|
|
315
|
-
"""
|
|
486
|
+
"""设置系统消息"""
|
|
316
487
|
raise NotImplementedError("set_system_prompt is not implemented")
|
|
317
488
|
|
|
318
489
|
@abstractmethod
|
|
319
490
|
def get_model_list(self) -> List[Tuple[str, str]]:
|
|
320
|
-
"""
|
|
491
|
+
"""获取模型列表"""
|
|
321
492
|
raise NotImplementedError("get_model_list is not implemented")
|
|
322
493
|
|
|
323
494
|
@classmethod
|
|
324
495
|
@abstractmethod
|
|
325
496
|
def get_required_env_keys(cls) -> List[str]:
|
|
326
|
-
"""
|
|
497
|
+
"""获取必需的环境变量键"""
|
|
327
498
|
raise NotImplementedError("get_required_env_keys is not implemented")
|
|
328
499
|
|
|
329
500
|
@classmethod
|
|
330
501
|
def get_env_defaults(cls) -> Dict[str, str]:
|
|
331
|
-
"""
|
|
502
|
+
"""获取环境变量默认值"""
|
|
332
503
|
return {}
|
|
333
504
|
|
|
334
505
|
@classmethod
|
|
335
506
|
def get_env_config_guide(cls) -> Dict[str, str]:
|
|
336
|
-
"""
|
|
507
|
+
"""获取环境变量配置指南
|
|
337
508
|
|
|
338
|
-
|
|
339
|
-
Dict[str, str]:
|
|
509
|
+
返回:
|
|
510
|
+
Dict[str, str]: 将环境变量键名映射到其配置说明的字典
|
|
340
511
|
"""
|
|
341
512
|
return {}
|
|
342
513
|
|
|
343
514
|
def set_suppress_output(self, suppress: bool):
|
|
344
|
-
"""
|
|
515
|
+
"""设置是否抑制输出"""
|
|
345
516
|
self.suppress_output = suppress
|
|
346
517
|
|
|
347
518
|
def set_model_group(self, model_group: Optional[str]):
|
|
348
|
-
"""
|
|
519
|
+
"""设置模型组"""
|
|
349
520
|
self.model_group = model_group
|
|
350
521
|
|
|
351
522
|
def set_web(self, web: bool):
|
|
352
|
-
"""
|
|
523
|
+
"""设置网页标志"""
|
|
353
524
|
self.web = web
|
|
354
525
|
|
|
355
526
|
def _append_session_history(self, user_input: str, model_output: str) -> None:
|
|
@@ -398,7 +569,106 @@ class BasePlatform(ABC):
|
|
|
398
569
|
# Do not break chat flow if writing history fails
|
|
399
570
|
pass
|
|
400
571
|
|
|
572
|
+
def get_conversation_history(self) -> List[Dict[str, str]]:
|
|
573
|
+
"""获取当前对话历史
|
|
574
|
+
|
|
575
|
+
返回:
|
|
576
|
+
List[Dict[str, str]]: 对话历史列表,每个元素包含 role 和 content
|
|
577
|
+
|
|
578
|
+
注意:
|
|
579
|
+
默认实现检查是否有 messages 属性,子类可以重写此方法以提供自定义实现
|
|
580
|
+
"""
|
|
581
|
+
if hasattr(self, "messages"):
|
|
582
|
+
return getattr(self, "messages", [])
|
|
583
|
+
return []
|
|
584
|
+
|
|
585
|
+
def get_used_token_count(self) -> int:
|
|
586
|
+
"""计算当前对话历史使用的token数量
|
|
587
|
+
|
|
588
|
+
返回:
|
|
589
|
+
int: 当前对话历史使用的token数量
|
|
590
|
+
"""
|
|
591
|
+
history = self.get_conversation_history()
|
|
592
|
+
if not history:
|
|
593
|
+
return 0
|
|
594
|
+
|
|
595
|
+
total_tokens = 0
|
|
596
|
+
for message in history:
|
|
597
|
+
content = message.get("content", "")
|
|
598
|
+
if content:
|
|
599
|
+
total_tokens += get_context_token_count(content)
|
|
600
|
+
|
|
601
|
+
return total_tokens
|
|
602
|
+
|
|
603
|
+
def get_remaining_token_count(self) -> int:
|
|
604
|
+
"""获取剩余可用的token数量
|
|
605
|
+
|
|
606
|
+
返回:
|
|
607
|
+
int: 剩余可用的token数量(输入窗口限制 - 当前使用的token数量)
|
|
608
|
+
"""
|
|
609
|
+
max_tokens = get_max_input_token_count(self.model_group)
|
|
610
|
+
used_tokens = self.get_used_token_count()
|
|
611
|
+
remaining = max_tokens - used_tokens
|
|
612
|
+
return max(0, remaining) # 确保返回值不为负数
|
|
613
|
+
|
|
614
|
+
def _truncate_message_if_needed(self, message: str) -> str:
|
|
615
|
+
"""如果消息超出剩余token限制,则截断消息
|
|
616
|
+
|
|
617
|
+
参数:
|
|
618
|
+
message: 原始消息
|
|
619
|
+
|
|
620
|
+
返回:
|
|
621
|
+
str: 截断后的消息(如果不需要截断则返回原消息)
|
|
622
|
+
"""
|
|
623
|
+
try:
|
|
624
|
+
# 获取剩余token数量
|
|
625
|
+
remaining_tokens = self.get_remaining_token_count()
|
|
626
|
+
|
|
627
|
+
# 如果剩余token为0或负数,返回空消息
|
|
628
|
+
if remaining_tokens <= 0:
|
|
629
|
+
print("⚠️ 警告:剩余token为0,无法发送消息")
|
|
630
|
+
return ""
|
|
631
|
+
|
|
632
|
+
# 计算消息的token数量
|
|
633
|
+
message_tokens = get_context_token_count(message)
|
|
634
|
+
|
|
635
|
+
# 如果消息token数小于等于剩余token数,不需要截断
|
|
636
|
+
if message_tokens <= remaining_tokens:
|
|
637
|
+
return message
|
|
638
|
+
|
|
639
|
+
# 需要截断:保留剩余token的80%用于消息,20%作为安全余量
|
|
640
|
+
target_tokens = int(remaining_tokens * 0.8)
|
|
641
|
+
if target_tokens <= 0:
|
|
642
|
+
print("⚠️ 警告:剩余token不足,无法发送消息")
|
|
643
|
+
return ""
|
|
644
|
+
|
|
645
|
+
# 估算字符数(1 token ≈ 4字符)
|
|
646
|
+
target_chars = target_tokens * 4
|
|
647
|
+
|
|
648
|
+
# 如果消息长度小于目标字符数,不需要截断(token估算可能有误差)
|
|
649
|
+
if len(message) <= target_chars:
|
|
650
|
+
return message
|
|
651
|
+
|
|
652
|
+
# 截断消息:保留前面的内容,添加截断提示
|
|
653
|
+
truncated_message = message[:target_chars]
|
|
654
|
+
# 尝试在最后一个完整句子处截断
|
|
655
|
+
last_period = truncated_message.rfind('.')
|
|
656
|
+
last_newline = truncated_message.rfind('\n')
|
|
657
|
+
last_break = max(last_period, last_newline)
|
|
658
|
+
|
|
659
|
+
if last_break > target_chars * 0.5: # 如果找到的断点不太靠前
|
|
660
|
+
truncated_message = truncated_message[:last_break + 1]
|
|
661
|
+
|
|
662
|
+
truncated_message += "\n\n... (消息过长,已截断以避免超出上下文限制)"
|
|
663
|
+
print(f"⚠️ 警告:消息过长({message_tokens} tokens),已截断至约 {target_tokens} tokens")
|
|
664
|
+
|
|
665
|
+
return truncated_message
|
|
666
|
+
except Exception as e:
|
|
667
|
+
# 如果截断过程中出错,返回原消息(避免阻塞对话)
|
|
668
|
+
print(f"⚠️ 警告:检查消息长度时出错: {e},使用原消息")
|
|
669
|
+
return message
|
|
670
|
+
|
|
401
671
|
@abstractmethod
|
|
402
672
|
def support_web(self) -> bool:
|
|
403
|
-
"""
|
|
673
|
+
"""检查平台是否支持网页功能"""
|
|
404
674
|
return False
|