auto-coder 0.1.361__py3-none-any.whl → 0.1.363__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/METADATA +2 -1
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/RECORD +57 -29
- autocoder/agent/auto_learn.py +249 -262
- autocoder/agent/base_agentic/__init__.py +0 -0
- autocoder/agent/base_agentic/agent_hub.py +169 -0
- autocoder/agent/base_agentic/agentic_lang.py +112 -0
- autocoder/agent/base_agentic/agentic_tool_display.py +180 -0
- autocoder/agent/base_agentic/base_agent.py +1582 -0
- autocoder/agent/base_agentic/default_tools.py +683 -0
- autocoder/agent/base_agentic/test_base_agent.py +82 -0
- autocoder/agent/base_agentic/tool_registry.py +425 -0
- autocoder/agent/base_agentic/tools/__init__.py +12 -0
- autocoder/agent/base_agentic/tools/ask_followup_question_tool_resolver.py +72 -0
- autocoder/agent/base_agentic/tools/attempt_completion_tool_resolver.py +37 -0
- autocoder/agent/base_agentic/tools/base_tool_resolver.py +35 -0
- autocoder/agent/base_agentic/tools/example_tool_resolver.py +46 -0
- autocoder/agent/base_agentic/tools/execute_command_tool_resolver.py +72 -0
- autocoder/agent/base_agentic/tools/list_files_tool_resolver.py +110 -0
- autocoder/agent/base_agentic/tools/plan_mode_respond_tool_resolver.py +35 -0
- autocoder/agent/base_agentic/tools/read_file_tool_resolver.py +54 -0
- autocoder/agent/base_agentic/tools/replace_in_file_tool_resolver.py +156 -0
- autocoder/agent/base_agentic/tools/search_files_tool_resolver.py +134 -0
- autocoder/agent/base_agentic/tools/talk_to_group_tool_resolver.py +96 -0
- autocoder/agent/base_agentic/tools/talk_to_tool_resolver.py +79 -0
- autocoder/agent/base_agentic/tools/use_mcp_tool_resolver.py +44 -0
- autocoder/agent/base_agentic/tools/write_to_file_tool_resolver.py +58 -0
- autocoder/agent/base_agentic/types.py +189 -0
- autocoder/agent/base_agentic/utils.py +100 -0
- autocoder/auto_coder.py +1 -1
- autocoder/auto_coder_runner.py +36 -14
- autocoder/chat/conf_command.py +11 -10
- autocoder/commands/auto_command.py +227 -159
- autocoder/common/__init__.py +2 -2
- autocoder/common/ignorefiles/ignore_file_utils.py +12 -8
- autocoder/common/result_manager.py +10 -2
- autocoder/common/rulefiles/autocoderrules_utils.py +169 -0
- autocoder/common/save_formatted_log.py +1 -1
- autocoder/common/v2/agent/agentic_edit.py +53 -41
- autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py +15 -12
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +73 -1
- autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +132 -4
- autocoder/common/v2/agent/agentic_edit_types.py +1 -2
- autocoder/common/v2/agent/agentic_tool_display.py +2 -3
- autocoder/common/v2/code_auto_generate_editblock.py +3 -1
- autocoder/index/index.py +14 -8
- autocoder/privacy/model_filter.py +297 -35
- autocoder/rag/long_context_rag.py +424 -397
- autocoder/rag/test_doc_filter.py +393 -0
- autocoder/rag/test_long_context_rag.py +473 -0
- autocoder/rag/test_token_limiter.py +342 -0
- autocoder/shadows/shadow_manager.py +1 -3
- autocoder/utils/_markitdown.py +22 -3
- autocoder/version.py +1 -1
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.361.dist-info → auto_coder-0.1.363.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1582 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
import re
|
|
7
|
+
import xml.sax.saxutils
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import List, Dict, Any, Union, Optional, Tuple, Type, Generator, Iterator
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from loguru import logger
|
|
13
|
+
import byzerllm
|
|
14
|
+
from pydantic import BaseModel
|
|
15
|
+
|
|
16
|
+
from autocoder.common import AutoCoderArgs, git_utils, SourceCodeList, SourceCode
|
|
17
|
+
from autocoder.common.global_cancel import global_cancel
|
|
18
|
+
from autocoder.rag.variable_holder import VariableHolder
|
|
19
|
+
from autocoder.utils import llms as llms_utils
|
|
20
|
+
from autocoder.common.global_cancel import global_cancel
|
|
21
|
+
from autocoder.common import detect_env
|
|
22
|
+
from autocoder.common import shells
|
|
23
|
+
from autocoder.common.printer import Printer
|
|
24
|
+
from autocoder.utils.auto_project_type import ProjectTypeAnalyzer
|
|
25
|
+
from autocoder.common.mcp_server import get_mcp_server, McpServerInfoRequest
|
|
26
|
+
from autocoder.common.file_monitor.monitor import FileMonitor
|
|
27
|
+
from autocoder.common.rulefiles.autocoderrules_utils import get_rules
|
|
28
|
+
from autocoder.auto_coder_runner import load_tokenizer
|
|
29
|
+
from autocoder.linters.shadow_linter import ShadowLinter
|
|
30
|
+
from autocoder.compilers.shadow_compiler import ShadowCompiler
|
|
31
|
+
from autocoder.shadows.shadow_manager import ShadowManager
|
|
32
|
+
from autocoder.events.event_manager_singleton import get_event_manager
|
|
33
|
+
from autocoder.events.event_types import Event, EventType, EventMetadata
|
|
34
|
+
from autocoder.events import event_content as EventContentCreator
|
|
35
|
+
from autocoder.memory.active_context_manager import ActiveContextManager
|
|
36
|
+
from autocoder.common.action_yml_file_manager import ActionYmlFileManager
|
|
37
|
+
|
|
38
|
+
from .types import (
|
|
39
|
+
BaseTool, ToolResult, AgentRequest, FileChangeEntry,
|
|
40
|
+
LLMOutputEvent, LLMThinkingEvent, ToolCallEvent, ToolResultEvent,
|
|
41
|
+
CompletionEvent, ErrorEvent, TokenUsageEvent, AttemptCompletionTool,
|
|
42
|
+
PlanModeRespondTool,Message,ReplyDecision, PlanModeRespondEvent
|
|
43
|
+
)
|
|
44
|
+
from .tool_registry import ToolRegistry
|
|
45
|
+
from .tools.base_tool_resolver import BaseToolResolver
|
|
46
|
+
from .agent_hub import AgentHub, Group, GroupMembership
|
|
47
|
+
from .utils import GroupUtils,GroupMemberResponse
|
|
48
|
+
from .default_tools import register_default_tools
|
|
49
|
+
from .agentic_tool_display import get_tool_display_message
|
|
50
|
+
from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
|
|
51
|
+
from autocoder.common.save_formatted_log import save_formatted_log
|
|
52
|
+
from . import agentic_lang
|
|
53
|
+
|
|
54
|
+
class BaseAgent(ABC):
|
|
55
|
+
"""
|
|
56
|
+
基础代理类,所有的代理实现都应继承此类
|
|
57
|
+
遵循初始化顺序规则,避免FileMonitor、token计数器等组件冲突
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
name:str,
|
|
63
|
+
llm: Union[byzerllm.ByzerLLM, byzerllm.SimpleByzerLLM],
|
|
64
|
+
files: SourceCodeList,
|
|
65
|
+
args: AutoCoderArgs,
|
|
66
|
+
conversation_history: Optional[List[Dict[str, Any]]] = None,
|
|
67
|
+
):
|
|
68
|
+
"""
|
|
69
|
+
初始化代理
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
llm: 语言模型客户端
|
|
73
|
+
files: 源码文件列表
|
|
74
|
+
args: 配置参数
|
|
75
|
+
conversation_history: 对话历史记录
|
|
76
|
+
"""
|
|
77
|
+
# 1. 初始化FileMonitor(必须最先进行)
|
|
78
|
+
try:
|
|
79
|
+
monitor = FileMonitor(args.source_dir)
|
|
80
|
+
if not monitor.is_running():
|
|
81
|
+
monitor.start()
|
|
82
|
+
logger.info(f"文件监控已启动: {args.source_dir}")
|
|
83
|
+
else:
|
|
84
|
+
logger.info(f"文件监控已在运行中: {monitor.root_dir}")
|
|
85
|
+
|
|
86
|
+
# 2. 加载规则文件
|
|
87
|
+
_ = get_rules(args.source_dir)
|
|
88
|
+
except Exception as e:
|
|
89
|
+
logger.error(f"初始化文件监控出错: {e}")
|
|
90
|
+
|
|
91
|
+
# 3. 加载tokenizer (必须在前两步之后)
|
|
92
|
+
if VariableHolder.TOKENIZER_PATH is None:
|
|
93
|
+
load_tokenizer()
|
|
94
|
+
|
|
95
|
+
# 4. 初始化基本组件
|
|
96
|
+
self.llm = llm
|
|
97
|
+
self.args = args
|
|
98
|
+
self.files = files
|
|
99
|
+
self.printer = Printer()
|
|
100
|
+
self.conversation_history = conversation_history or []
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# 5. 初始化其他组件
|
|
104
|
+
self.project_type_analyzer = ProjectTypeAnalyzer(args=args, llm=self.llm)
|
|
105
|
+
self.shadow_manager = ShadowManager(args.source_dir, args.event_file, args.ignore_clean_shadows)
|
|
106
|
+
self.shadow_linter = ShadowLinter(self.shadow_manager, verbose=False)
|
|
107
|
+
self.shadow_compiler = ShadowCompiler(self.shadow_manager, verbose=False)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
# MCP 服务信息
|
|
112
|
+
self.mcp_server_info = ""
|
|
113
|
+
try:
|
|
114
|
+
self.mcp_server = get_mcp_server()
|
|
115
|
+
mcp_server_info_response = self.mcp_server.send_request(
|
|
116
|
+
McpServerInfoRequest(
|
|
117
|
+
model=args.inference_model or args.model,
|
|
118
|
+
product_mode=args.product_mode,
|
|
119
|
+
)
|
|
120
|
+
)
|
|
121
|
+
self.mcp_server_info = mcp_server_info_response.result
|
|
122
|
+
except Exception as e:
|
|
123
|
+
logger.error(f"Error getting MCP server info: {str(e)}")
|
|
124
|
+
|
|
125
|
+
# 变更跟踪信息
|
|
126
|
+
# 格式: { file_path: FileChangeEntry(...) }
|
|
127
|
+
self.file_changes: Dict[str, FileChangeEntry] = {}
|
|
128
|
+
|
|
129
|
+
self.name = name
|
|
130
|
+
|
|
131
|
+
# 初始化群聊/私聊功能
|
|
132
|
+
self.joined_groups: Dict[str, Group] = {}
|
|
133
|
+
self.private_chats: Dict[str, List[Message]] = {}
|
|
134
|
+
self.agentic_conversations: List[Dict[str,Any]] = []
|
|
135
|
+
self.custom_system_prompt = "You are a highly skilled software engineer with extensive knowledge in many programming languages, frameworks, design patterns, and best practices."
|
|
136
|
+
self.refuse_reply_reason = ""
|
|
137
|
+
self._group_lock = threading.RLock() # 保护 joined_groups
|
|
138
|
+
self._chat_lock = threading.RLock() # 保护 private_chats
|
|
139
|
+
# 自动注册到AgentHub
|
|
140
|
+
AgentHub.register_agent(self)
|
|
141
|
+
register_default_tools(params=self._render_context())
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def who_am_i(self, role: str) -> 'BaseAgent':
|
|
145
|
+
self.custom_system_prompt = role
|
|
146
|
+
return self
|
|
147
|
+
|
|
148
|
+
def when_to_refuse_reply(self, reason: str) -> 'BaseAgent':
|
|
149
|
+
self.refuse_reply_reason = reason
|
|
150
|
+
return self
|
|
151
|
+
|
|
152
|
+
def introduce_myself(self) -> str:
|
|
153
|
+
return self.custom_system_prompt
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def join_group(self, group: Group) -> 'GroupMembership':
|
|
157
|
+
if group.name not in self.joined_groups:
|
|
158
|
+
self.joined_groups[group.name] = group
|
|
159
|
+
group.add_member(self)
|
|
160
|
+
return GroupMembership(self, group)
|
|
161
|
+
|
|
162
|
+
def talk_to_group(self, group: Group, content: str, mentions: List['BaseAgent'] = [], print_conversation: bool = False):
|
|
163
|
+
message = Message(
|
|
164
|
+
sender=self.name,
|
|
165
|
+
content=content,
|
|
166
|
+
is_group=True,
|
|
167
|
+
group_name=group.name,
|
|
168
|
+
mentions=[m.name for m in mentions]
|
|
169
|
+
)
|
|
170
|
+
group.broadcast(message, print_conversation)
|
|
171
|
+
return self
|
|
172
|
+
|
|
173
|
+
def choose_group(self,content:str)->List[GroupMemberResponse]:
|
|
174
|
+
group_utils = GroupUtils(self.llm)
|
|
175
|
+
v = group_utils.auto_select_group(content,self.joined_groups.values())
|
|
176
|
+
return v
|
|
177
|
+
|
|
178
|
+
def talk_to(self, other: Union['BaseAgent', Group], content: str, mentions: List['BaseAgent'] = [], print_conversation: bool = False):
|
|
179
|
+
if isinstance(other, Group):
|
|
180
|
+
return self.talk_to_group(other, content, mentions, print_conversation)
|
|
181
|
+
|
|
182
|
+
message = Message(
|
|
183
|
+
sender=self.name,
|
|
184
|
+
content=content,
|
|
185
|
+
is_group=False,
|
|
186
|
+
mentions=[m.name for m in mentions]
|
|
187
|
+
)
|
|
188
|
+
if print_conversation:
|
|
189
|
+
print(f"[Private Chat] {self.name} -> {other.name}: {content}")
|
|
190
|
+
|
|
191
|
+
# 存储双向对话记录
|
|
192
|
+
self._add_private_message(other.name, message)
|
|
193
|
+
other._add_private_message(self.name, message)
|
|
194
|
+
|
|
195
|
+
response = other.generate_reply(message)
|
|
196
|
+
|
|
197
|
+
if print_conversation:
|
|
198
|
+
print(f">>> {other.name} reply to {self.name} with strategy: {response.strategy}, reason: {response.reason}")
|
|
199
|
+
|
|
200
|
+
if response.strategy == "ignore":
|
|
201
|
+
return
|
|
202
|
+
elif response.strategy == "private":
|
|
203
|
+
mentions = [AgentHub.get_agent(m) for m in response.mentions]
|
|
204
|
+
other.talk_to(other=self, content=response.content, mentions=mentions, print_conversation=print_conversation)
|
|
205
|
+
elif response.strategy == "broadcast":
|
|
206
|
+
warning_msg = f"invalid strategy broadcast action in private chat:[{self.name}] 广播消息给群组 {other.name}: {response.content}"
|
|
207
|
+
logger.warning(warning_msg)
|
|
208
|
+
if print_conversation:
|
|
209
|
+
print(f"[Private Chat Warning] {warning_msg}")
|
|
210
|
+
return self
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _add_private_message(self, other_name: str, message: Message):
|
|
214
|
+
with self._chat_lock:
|
|
215
|
+
if other_name not in self.private_chats:
|
|
216
|
+
self.private_chats[other_name] = []
|
|
217
|
+
self.private_chats[other_name].append(message)
|
|
218
|
+
|
|
219
|
+
def threadsafe_receive(self, message: Message,print_conversation: bool = False):
|
|
220
|
+
self.receive_message(message,print_conversation=print_conversation)
|
|
221
|
+
|
|
222
|
+
def receive_message(self, message: Message,print_conversation: bool = False):
|
|
223
|
+
if message.is_group:
|
|
224
|
+
prefix = f"[Group {message.group_name}]"
|
|
225
|
+
if message.mentions and self.name in message.mentions:
|
|
226
|
+
prefix += " @You"
|
|
227
|
+
print(f"{prefix} {message.sender}: {message.content}")
|
|
228
|
+
reply_decision = self.generate_reply(message)
|
|
229
|
+
print(f">>> {self.name} reply to {message.sender} with strategy: {reply_decision.strategy}, reason: {reply_decision.reason}")
|
|
230
|
+
if reply_decision.strategy == "ignore":
|
|
231
|
+
return
|
|
232
|
+
elif reply_decision.strategy == "private":
|
|
233
|
+
self.talk_to(other=AgentHub.get_agent(message.sender), content=reply_decision.content,print_conversation=print_conversation)
|
|
234
|
+
elif reply_decision.strategy == "broadcast":
|
|
235
|
+
self.joined_groups[message.group_name].broadcast(Message(
|
|
236
|
+
sender=self.name,
|
|
237
|
+
content=reply_decision.content,
|
|
238
|
+
is_group=True,
|
|
239
|
+
group_name=message.group_name,
|
|
240
|
+
mentions=reply_decision.mentions,
|
|
241
|
+
priority=reply_decision.priority
|
|
242
|
+
))
|
|
243
|
+
else:
|
|
244
|
+
print(f"[Private] {message.sender}: {message.content}")
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def generate_reply(self, message: Message) -> ReplyDecision:
|
|
248
|
+
user_input = self._generate_reply.prompt(message)
|
|
249
|
+
events = self.agentic_run(AgentRequest(user_input=user_input))
|
|
250
|
+
for event in events:
|
|
251
|
+
if isinstance(event, CompletionEvent):
|
|
252
|
+
from byzerllm.utils.str2model import to_model
|
|
253
|
+
return to_model(ReplyDecision, event.result)
|
|
254
|
+
elif isinstance(event, ErrorEvent):
|
|
255
|
+
logger.error(f"Error generating reply: {event.error}")
|
|
256
|
+
return ReplyDecision(strategy="ignore", content="", reason="Error generating reply")
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
@byzerllm.prompt()
|
|
260
|
+
def _generate_reply(self, message: Message) -> str:
|
|
261
|
+
"""
|
|
262
|
+
你的名字是 {{ name }}
|
|
263
|
+
{% if message.is_group %}
|
|
264
|
+
当前群组是 {{ message.group_name }}
|
|
265
|
+
{% endif %}
|
|
266
|
+
当前时间: {{ time }}
|
|
267
|
+
{% if role %}
|
|
268
|
+
你对自己的描述是:
|
|
269
|
+
<who_are_you>
|
|
270
|
+
{{ role }}
|
|
271
|
+
</who_are_you>
|
|
272
|
+
{% endif %}
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
{{ message.sender }} 发送了一条{% if message.is_group %}群组消息{% else %}私聊消息{% endif %}:
|
|
276
|
+
<message>
|
|
277
|
+
{{ message.content }}
|
|
278
|
+
</message>
|
|
279
|
+
|
|
280
|
+
{% if message.mentions and message.mentions|length > 0 %}
|
|
281
|
+
这条消息的发送者特别 @ 了用户:({{ message.mentions|join(',') }})。
|
|
282
|
+
{% endif %}
|
|
283
|
+
|
|
284
|
+
{% if message.is_group %}
|
|
285
|
+
群组对话上下文:
|
|
286
|
+
<group_message_history>
|
|
287
|
+
{% for msg in group_message_history %}
|
|
288
|
+
{% if msg.sender == name %}
|
|
289
|
+
<role>你/You</role>: <msg>{{ msg.content }}</msg>
|
|
290
|
+
{% else %}
|
|
291
|
+
<role>{{ msg.sender }}</role>: <msg>{{ msg.content }}</msg>
|
|
292
|
+
{% endif %}
|
|
293
|
+
{% endfor %}
|
|
294
|
+
</group_message_history>
|
|
295
|
+
{% else %}
|
|
296
|
+
私聊对话上下文:
|
|
297
|
+
<private_message_history>
|
|
298
|
+
{% for msg in private_message_history %}
|
|
299
|
+
{% if msg.sender == name %}
|
|
300
|
+
<role>你/You</role>: <msg>{{ msg.content }}</msg>
|
|
301
|
+
{% else %}
|
|
302
|
+
<role>{{ msg.sender }}</role>: <msg>{{ msg.content }}</msg>
|
|
303
|
+
{% endif %}
|
|
304
|
+
{% endfor %}
|
|
305
|
+
</private_message_history>
|
|
306
|
+
{% endif %}
|
|
307
|
+
|
|
308
|
+
请根据上面内容进行解答,在最后请务必使用 attempt_completion 工具,确保里面的 result 字段包含如下 Json 格式内容:
|
|
309
|
+
|
|
310
|
+
```json
|
|
311
|
+
{
|
|
312
|
+
"content": "回复内容",
|
|
313
|
+
"strategy": "broadcast|private|ignore",
|
|
314
|
+
"mentions": ["被提及的agent名称"],
|
|
315
|
+
"priority": 优先级 0-100,
|
|
316
|
+
"reason": "选择策略的原因"
|
|
317
|
+
}
|
|
318
|
+
```
|
|
319
|
+
ignore 表示用户发送的消息可以不进行回复,private 我们要回复用户,并且只回复给发送信息的用户,broadcast 表示要回复消息,并且回复给群组所有成员。
|
|
320
|
+
*** 注意,阅读上面的所有内容,尤其关注 {% if message.is_group %}群组上下文{% else %}私聊上下文{% endif %},判断是否使用 ignore 策略结束对话,避免无意义对话。一般在群组对话中,你没有被 @ 就无需回答,直接使用ignore策略结束对话。 ***
|
|
321
|
+
{% if refuse_reply_reason %}
|
|
322
|
+
当满足以下描述时,你应当拒绝回复:
|
|
323
|
+
<when_to_refuse_reply>
|
|
324
|
+
{{ refuse_reply_reason }}
|
|
325
|
+
</when_to_refuse_reply>
|
|
326
|
+
{% endif %}
|
|
327
|
+
"""
|
|
328
|
+
context = {
|
|
329
|
+
"name": self.name,
|
|
330
|
+
"role": self.custom_system_prompt,
|
|
331
|
+
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
332
|
+
"refuse_reply_reason": self.refuse_reply_reason
|
|
333
|
+
}
|
|
334
|
+
if message.is_group:
|
|
335
|
+
group = self.joined_groups[message.group_name]
|
|
336
|
+
context["group_message_history"] = group.history
|
|
337
|
+
else:
|
|
338
|
+
context["private_message_history"] = self.private_chats.get(message.sender, [])
|
|
339
|
+
|
|
340
|
+
return context
|
|
341
|
+
|
|
342
|
+
def record_file_change(self, file_path: str, change_type: str, diff: Optional[str] = None, content: Optional[str] = None):
|
|
343
|
+
"""
|
|
344
|
+
记录单个文件的变更信息。
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
file_path: 相对路径
|
|
348
|
+
change_type: 'added' 或 'modified'
|
|
349
|
+
diff: 对于 replace_in_file,传入 diff 内容
|
|
350
|
+
content: 最新文件内容(可选,通常用于 write_to_file)
|
|
351
|
+
"""
|
|
352
|
+
entry = self.file_changes.get(file_path)
|
|
353
|
+
if entry is None:
|
|
354
|
+
entry = FileChangeEntry(
|
|
355
|
+
type=change_type, diffs=[], content=content)
|
|
356
|
+
self.file_changes[file_path] = entry
|
|
357
|
+
else:
|
|
358
|
+
# 文件已经存在,可能之前是 added,现在又被 modified,或者多次 modified
|
|
359
|
+
# 简单起见,type 用 added 优先,否则为 modified
|
|
360
|
+
if entry.type != "added":
|
|
361
|
+
entry.type = change_type
|
|
362
|
+
|
|
363
|
+
# content 以最新为准
|
|
364
|
+
if content is not None:
|
|
365
|
+
entry.content = content
|
|
366
|
+
|
|
367
|
+
if diff:
|
|
368
|
+
entry.diffs.append(diff)
|
|
369
|
+
|
|
370
|
+
def _get_all_file_changes(self) -> Dict[str, FileChangeEntry]:
|
|
371
|
+
"""
|
|
372
|
+
获取当前记录的所有文件变更信息。
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
字典,key 为文件路径,value 为变更详情
|
|
376
|
+
"""
|
|
377
|
+
return self.file_changes
|
|
378
|
+
|
|
379
|
+
def _get_changed_files_from_shadow(self) -> List[str]:
|
|
380
|
+
"""
|
|
381
|
+
获取影子系统当前有哪些文件被修改或新增。
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
变更的文件路径列表
|
|
385
|
+
"""
|
|
386
|
+
changed_files = []
|
|
387
|
+
shadow_root = self.shadow_manager.shadows_dir
|
|
388
|
+
for root, dirs, files in os.walk(shadow_root):
|
|
389
|
+
for fname in files:
|
|
390
|
+
shadow_file_path = os.path.join(root, fname)
|
|
391
|
+
try:
|
|
392
|
+
project_file_path = self.shadow_manager.from_shadow_path(
|
|
393
|
+
shadow_file_path)
|
|
394
|
+
rel_path = os.path.relpath(
|
|
395
|
+
project_file_path, self.args.source_dir)
|
|
396
|
+
changed_files.append(rel_path)
|
|
397
|
+
except Exception:
|
|
398
|
+
# 非映射关系,忽略
|
|
399
|
+
continue
|
|
400
|
+
return changed_files
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def _reconstruct_tool_xml(self, tool: BaseTool) -> str:
|
|
404
|
+
"""
|
|
405
|
+
Reconstructs the XML representation of a tool call from its Pydantic model.
|
|
406
|
+
"""
|
|
407
|
+
tool_tag = next(
|
|
408
|
+
(tag for tag, model in ToolRegistry.get_tag_model_map().items() if isinstance(tool, model)), None)
|
|
409
|
+
if not tool_tag:
|
|
410
|
+
logger.error(
|
|
411
|
+
f"Cannot find tag name for tool type {type(tool).__name__}")
|
|
412
|
+
# Return a placeholder or raise? Let's return an error XML string.
|
|
413
|
+
return f"<error>Could not find tag for tool {type(tool).__name__}</error>"
|
|
414
|
+
|
|
415
|
+
xml_parts = [f"<{tool_tag}>"]
|
|
416
|
+
for field_name, field_value in tool.model_dump(exclude_none=True).items():
|
|
417
|
+
# Format value based on type, ensuring XML safety
|
|
418
|
+
if isinstance(field_value, bool):
|
|
419
|
+
value_str = str(field_value).lower()
|
|
420
|
+
elif isinstance(field_value, (list, dict)):
|
|
421
|
+
# Simple string representation for list/dict for now.
|
|
422
|
+
# Consider JSON within the tag if needed and supported by the prompt/LLM.
|
|
423
|
+
# Use JSON for structured data
|
|
424
|
+
value_str = json.dumps(field_value, ensure_ascii=False)
|
|
425
|
+
else:
|
|
426
|
+
value_str = str(field_value)
|
|
427
|
+
|
|
428
|
+
# Escape the value content
|
|
429
|
+
escaped_value = xml.sax.saxutils.escape(value_str)
|
|
430
|
+
|
|
431
|
+
# Handle multi-line content like 'content' or 'diff' - ensure newlines are preserved
|
|
432
|
+
if '\n' in value_str:
|
|
433
|
+
# Add newline before closing tag for readability if content spans multiple lines
|
|
434
|
+
xml_parts.append(
|
|
435
|
+
f"<{field_name}>\n{escaped_value}\n</{field_name}>")
|
|
436
|
+
else:
|
|
437
|
+
xml_parts.append(
|
|
438
|
+
f"<{field_name}>{escaped_value}</{field_name}>")
|
|
439
|
+
|
|
440
|
+
xml_parts.append(f"</{tool_tag}>")
|
|
441
|
+
# Join with newline for readability, matching prompt examples
|
|
442
|
+
return "\n".join(xml_parts)
|
|
443
|
+
|
|
444
|
+
@byzerllm.prompt()
|
|
445
|
+
def _system(self, request: AgentRequest) -> str:
|
|
446
|
+
"""
|
|
447
|
+
{{system_prompt}}
|
|
448
|
+
|
|
449
|
+
====
|
|
450
|
+
|
|
451
|
+
TOOL USE
|
|
452
|
+
|
|
453
|
+
You have access to a set of tools that are executed upon the user's approval. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
|
|
454
|
+
|
|
455
|
+
# Tool Use Formatting
|
|
456
|
+
|
|
457
|
+
Tool use is formatted using XML-style tags. The tool name is enclosed in opening and closing tags, and each parameter is similarly enclosed within its own set of tags. Here's the structure:
|
|
458
|
+
|
|
459
|
+
<tool_name>
|
|
460
|
+
<parameter1_name>value1</parameter1_name>
|
|
461
|
+
<parameter2_name>value2</parameter2_name>
|
|
462
|
+
...
|
|
463
|
+
</tool_name>
|
|
464
|
+
|
|
465
|
+
For example:
|
|
466
|
+
|
|
467
|
+
<read_file>
|
|
468
|
+
<path>src/main.js</path>
|
|
469
|
+
</read_file>
|
|
470
|
+
|
|
471
|
+
Always adhere to this format for the tool use to ensure proper parsing and execution.
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
# Tools
|
|
475
|
+
|
|
476
|
+
{% for tool_tag, tool_description in tool_descriptions.items() %}
|
|
477
|
+
## {{ tool_tag }}
|
|
478
|
+
{{ tool_description.description }}
|
|
479
|
+
{% endfor %}
|
|
480
|
+
|
|
481
|
+
{%if mcp_server_info %}
|
|
482
|
+
### MCP_SERVER_LIST
|
|
483
|
+
{{mcp_server_info}}
|
|
484
|
+
{%endif%}
|
|
485
|
+
|
|
486
|
+
{%if agent_info %}
|
|
487
|
+
### AVAILABLE_AGENTS
|
|
488
|
+
{{agent_info}}
|
|
489
|
+
{%endif%}
|
|
490
|
+
|
|
491
|
+
{%if group_info %}
|
|
492
|
+
### AVAILABLE_GROUPS
|
|
493
|
+
{{group_info}}
|
|
494
|
+
{%endif%}
|
|
495
|
+
|
|
496
|
+
# Tool Use Examples
|
|
497
|
+
{%- set example_count = 0 -%}
|
|
498
|
+
{%- for tool_tag, example in tool_examples.items() -%}
|
|
499
|
+
{%- if example -%}
|
|
500
|
+
{%- set example_count = example_count + 1 -%}
|
|
501
|
+
## Example {{ example_count }}: {{ example.title }}
|
|
502
|
+
{{ example.body }}
|
|
503
|
+
{%- endif -%}
|
|
504
|
+
{%- endfor -%}
|
|
505
|
+
|
|
506
|
+
# Tool Use Guidelines
|
|
507
|
+
|
|
508
|
+
1. In <thinking> tags, assess what information you already have and what information you need to proceed with the task.
|
|
509
|
+
2. Choose the most appropriate tool based on the task and the tool descriptions provided. Assess if you need additional information to proceed, and which of the available tools would be most effective for gathering this information. For example using the list_files tool is more effective than running a command like \`ls\` in the terminal. It's critical that you think about each available tool and use the one that best fits the current step in the task.
|
|
510
|
+
3. If multiple actions are needed, use one tool at a time per message to accomplish the task iteratively, with each tool use being informed by the result of the previous tool use. Do not assume the outcome of any tool use. Each step must be informed by the previous step's result.
|
|
511
|
+
4. Formulate your tool use using the XML format specified for each tool.
|
|
512
|
+
5. After each tool use, the user will respond with the result of that tool use. This result will provide you with the necessary information to continue your task or make further decisions. This response may include:
|
|
513
|
+
- Information about whether the tool succeeded or failed, along with any reasons for failure.
|
|
514
|
+
- Linter errors that may have arisen due to the changes you made, which you'll need to address.
|
|
515
|
+
- New terminal output in reaction to the changes, which you may need to consider or act upon.
|
|
516
|
+
- Any other relevant feedback or information related to the tool use.
|
|
517
|
+
6. ALWAYS wait for user confirmation after each tool use before proceeding. Never assume the success of a tool use without explicit confirmation of the result from the user.
|
|
518
|
+
{%- for tool_name, guideline in tool_guidelines.items() -%}
|
|
519
|
+
{{ loop.index + 6 }}. **{{ tool_name }}**: {{ guideline }}
|
|
520
|
+
{%- endfor -%}
|
|
521
|
+
|
|
522
|
+
It is crucial to proceed step-by-step, waiting for the user's message after each tool use before moving forward with the task. This approach allows you to:
|
|
523
|
+
1. Confirm the success of each step before proceeding.
|
|
524
|
+
2. Address any issues or errors that arise immediately.
|
|
525
|
+
3. Adapt your approach based on new information or unexpected results.
|
|
526
|
+
4. Ensure that each action builds correctly on the previous ones.
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
By waiting for and carefully considering the user's response after each tool use, you can react accordingly and make informed decisions about how to proceed with the task. This iterative process helps ensure the overall success and accuracy of your work.
|
|
530
|
+
|
|
531
|
+
{% for case_name, case_info in tool_case_docs.items() %}
|
|
532
|
+
|
|
533
|
+
====
|
|
534
|
+
|
|
535
|
+
# {{ case_name | upper }}
|
|
536
|
+
|
|
537
|
+
{{ case_info.doc }}
|
|
538
|
+
{% endfor %}
|
|
539
|
+
|
|
540
|
+
====
|
|
541
|
+
|
|
542
|
+
ACT MODE V.S. PLAN MODE
|
|
543
|
+
|
|
544
|
+
In each user message, the environment_details will specify the current mode. There are two modes:
|
|
545
|
+
|
|
546
|
+
- ACT MODE: In this mode, you have access to all tools EXCEPT the plan_mode_respond tool.
|
|
547
|
+
- In ACT MODE, you use tools to accomplish the user's task. Once you've completed the user's task, you use the attempt_completion tool to present the result of the task to the user.
|
|
548
|
+
- PLAN MODE: In this special mode, you have access to the plan_mode_respond tool.
|
|
549
|
+
- In PLAN MODE, the goal is to gather information and get context to create a detailed plan for accomplishing the task, which the user will review and approve before they switch you to ACT MODE to implement the solution.
|
|
550
|
+
- In PLAN MODE, when you need to converse with the user or present a plan, you should use the plan_mode_respond tool to deliver your response directly, rather than using <thinking> tags to analyze when to respond. Do not talk about using plan_mode_respond - just use it directly to share your thoughts and provide helpful answers.
|
|
551
|
+
|
|
552
|
+
## What is PLAN MODE?
|
|
553
|
+
|
|
554
|
+
- While you are usually in ACT MODE, the user may switch to PLAN MODE in order to have a back and forth with you to plan how to best accomplish the task.
|
|
555
|
+
- When starting in PLAN MODE, depending on the user's request, you may need to do some information gathering e.g. using read_file or search_files to get more context about the task. You may also ask the user clarifying questions to get a better understanding of the task. You may return mermaid diagrams to visually display your understanding.
|
|
556
|
+
- Once you've gained more context about the user's request, you should architect a detailed plan for how you will accomplish the task. Returning mermaid diagrams may be helpful here as well.
|
|
557
|
+
- Then you might ask the user if they are pleased with this plan, or if they would like to make any changes. Think of this as a brainstorming session where you can discuss the task and plan the best way to accomplish it.
|
|
558
|
+
- If at any point a mermaid diagram would make your plan clearer to help the user quickly see the structure, you are encouraged to include a Mermaid code block in the response. (Note: if you use colors in your mermaid diagrams, be sure to use high contrast colors so the text is readable.)
|
|
559
|
+
- Finally once it seems like you've reached a good plan, ask the user to switch you back to ACT MODE to implement the solution.
|
|
560
|
+
|
|
561
|
+
{% if enable_active_context_in_generate %}
|
|
562
|
+
====
|
|
563
|
+
|
|
564
|
+
PROJECT PACKAGE CONTEXT
|
|
565
|
+
|
|
566
|
+
Each directory can contain a short **`active.md`** summary file located under the mirrored path inside
|
|
567
|
+
`{{ current_project }}/.auto-coder/active-context/`.
|
|
568
|
+
|
|
569
|
+
* **Purpose** – captures only the files that have **recently changed** in that directory. It is *not* a full listing.
|
|
570
|
+
* **Example** – for `{{ current_project }}/src/abc/bbc`, the summary is
|
|
571
|
+
`{{ current_project }}/.auto-coder/active-context/src/abc/bbc/active.md`.
|
|
572
|
+
|
|
573
|
+
**Reading a summary**
|
|
574
|
+
|
|
575
|
+
```xml
|
|
576
|
+
<read_file>
|
|
577
|
+
<path>.auto-coder/active-context/src/abc/bbc/active.md</path>
|
|
578
|
+
</read_file>
|
|
579
|
+
```
|
|
580
|
+
|
|
581
|
+
Use these summaries to quickly decide which files deserve a deeper look with tools like
|
|
582
|
+
`read_file`, `search_files`, or `list_code_definition_names`.
|
|
583
|
+
|
|
584
|
+
{% endif %}
|
|
585
|
+
====
|
|
586
|
+
|
|
587
|
+
CAPABILITIES
|
|
588
|
+
|
|
589
|
+
- You are a powerful deep research RAG system, specialized in collecting and synthesizing information to answer complex user queries.
|
|
590
|
+
- You can execute commands, list files, perform regex searches, read file contents, ask follow-up questions, and more to help you thoroughly research any topic.
|
|
591
|
+
- When a user presents a task, a list of file paths in the current working directory ('{{ current_project }}') is included in environment_details, providing an overview of the project structure.
|
|
592
|
+
- You can use the search_files tool to perform regex searches in specified directories, with results including context. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need attention.
|
|
593
|
+
- You can use the execute_command tool to run commands on the user's computer to perform specific system operations. Commands will be executed in the current working directory.
|
|
594
|
+
- Remember, the write_to_file and replace_in_file tools are ONLY for creating and updating research plans, search strategies, or summarizing findings—never for modifying system files or operational code.
|
|
595
|
+
|
|
596
|
+
====
|
|
597
|
+
|
|
598
|
+
RULES
|
|
599
|
+
|
|
600
|
+
- Your current working directory is: {{current_project}}
|
|
601
|
+
- You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '{{ current_project }}', so be sure to pass in the correct 'path' parameter when using tools that require a path.
|
|
602
|
+
- Do not use the ~ character or $HOME to refer to the home directory.
|
|
603
|
+
- Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '{{ current_project }}', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '{{current_project}}'). For example, if you needed to run \`npm install\` in a project outside of '{{current_project}}', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`.
|
|
604
|
+
- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches.
|
|
605
|
+
- The write_to_file and replace_in_file tools are ONLY to be used for creating and updating research plans, search strategies, or summarizing findings. They are NOT to be used for modifying system files or any operational code.
|
|
606
|
+
- When making research plans, always consider the context and objectives clearly outlined by the user.
|
|
607
|
+
- Do not ask for more information than necessary. Use the tools provided to accomplish the user's request efficiently and effectively. When you've completed your task, you must use the attempt_completion tool to present the result to the user. The user may provide feedback, which you can use to make improvements and try again.
|
|
608
|
+
{% if enable_tool_ask_followup_question %}
|
|
609
|
+
- You are only allowed to ask the user questions using the ask_followup_question tool. Use this tool only when you need additional details to complete a task, and be sure to use a clear and concise question that will help you move forward with the task. However if you can use the available tools to avoid having to ask the user questions, you should do so. For example, if the user mentions a file that may be in an outside directory like the Desktop, you should use the list_files tool to list the files in the Desktop and check if the file they are talking about is there, rather than asking the user to provide the file path themselves.
|
|
610
|
+
- When executing commands, if you don't see the expected output, assume the terminal executed the command successfully and proceed with the task. The user's terminal may be unable to stream the output back properly. If you absolutely need to see the actual terminal output, use the ask_followup_question tool to request the user to copy and paste it back to you.
|
|
611
|
+
{% endif %}
|
|
612
|
+
- The user may provide a file's contents directly in their message, in which case you shouldn't use the read_file tool to get the file contents again since you already have it.
|
|
613
|
+
- Your goal is to try to accomplish the user's task, NOT engage in a back and forth conversation.
|
|
614
|
+
- NEVER end attempt_completion result with a question or request to engage in further conversation! Formulate the end of your result in a way that is final and does not require further input from the user.
|
|
615
|
+
- You are STRICTLY FORBIDDEN from starting your messages with "Great", "Certainly", "Okay", "Sure". You should NOT be conversational in your responses, but rather direct and to the point. For example you should NOT say "Great, I've updated the CSS" but instead something like "I've updated the research findings". It is important you be clear and technical in your messages.
|
|
616
|
+
- When presented with images, utilize your vision capabilities to thoroughly examine them and extract meaningful information. Incorporate these insights into your thought process as you accomplish the user's task.
|
|
617
|
+
- At the end of each user message, you will automatically receive environment_details. This information is not written by the user themselves, but is auto-generated to provide potentially relevant context about the project structure and environment. While this information can be valuable for understanding the project context, do not treat it as a direct part of the user's request or response. Use it to inform your actions and decisions, but don't assume the user is explicitly asking about or referring to this information unless they clearly do so in their message. When using environment_details, explain your actions clearly to ensure the user understands, as they may not be aware of these details.
|
|
618
|
+
- Before executing commands, check the "Actively Running Terminals" section in environment_details. If present, consider how these active processes might impact your task. For example, if a local development server is already running, you wouldn't need to start it again. If no active terminals are listed, proceed with command execution as normal.
|
|
619
|
+
- When using the replace_in_file tool, you must include complete lines in your SEARCH blocks, not partial lines. The system requires exact line matches and cannot match partial lines. For example, if you want to match a line containing "const x = 5;", your SEARCH block must include the entire line, not just "x = 5" or other fragments.
|
|
620
|
+
- When using the replace_in_file tool, if you use multiple SEARCH/REPLACE blocks, list them in the order they appear in the file. For example if you need to make changes to both line 10 and line 50, first include the SEARCH/REPLACE block for line 10, followed by the SEARCH/REPLACE block for line 50.
|
|
621
|
+
- It is critical you wait for the user's response after each tool use, in order to confirm the success of the tool use.
|
|
622
|
+
|
|
623
|
+
{% if extra_docs %}
|
|
624
|
+
====
|
|
625
|
+
|
|
626
|
+
RULES PROVIDED BY USER
|
|
627
|
+
|
|
628
|
+
The following rules are provided by the user, and you must follow them strictly.
|
|
629
|
+
|
|
630
|
+
{% for key, value in extra_docs.items() %}
|
|
631
|
+
<user_rule>
|
|
632
|
+
##File: {{ key }}
|
|
633
|
+
{{ value }}
|
|
634
|
+
</user_rule>
|
|
635
|
+
{% endfor %}
|
|
636
|
+
{% endif %}
|
|
637
|
+
|
|
638
|
+
====
|
|
639
|
+
|
|
640
|
+
SYSTEM INFORMATION
|
|
641
|
+
|
|
642
|
+
Operating System: {{os_distribution}}
|
|
643
|
+
Default Shell: {{shell_type}}
|
|
644
|
+
Home Directory: {{home_dir}}
|
|
645
|
+
Current Working Directory: {{current_project}}
|
|
646
|
+
|
|
647
|
+
====
|
|
648
|
+
|
|
649
|
+
OBJECTIVE
|
|
650
|
+
|
|
651
|
+
You accomplish a given task iteratively, breaking it down into clear steps and working through them methodically.
|
|
652
|
+
|
|
653
|
+
1. Analyze the user's query and identify clear, achievable research objectives. Prioritize these objectives in a logical order.
|
|
654
|
+
2. Sequentially use available tools to gather information, using only one tool at a time. Each objective should correspond to a distinct step in your problem-solving process.
|
|
655
|
+
3. Before using tools, analyze within <thinking></thinking> tags. First examine the file structure in environment_details for context, consider which tool is most relevant, then analyze whether you have enough information for each required parameter. If all required parameters exist or can be reasonably inferred, close the thinking tag and proceed with the tool. If a required parameter value is missing, don't call the tool but instead use ask_followup_question to request information from the user.
|
|
656
|
+
4. After completing the task, use the attempt_completion tool to present your results to the user.
|
|
657
|
+
5. The user may provide feedback for improvements. Avoid pointless back-and-forth conversations; don't end responses with questions or offers of further assistance.
|
|
658
|
+
|
|
659
|
+
{% if file_paths_str %}
|
|
660
|
+
====
|
|
661
|
+
The following are files that the user is currently focusing on.
|
|
662
|
+
Make sure you always start your analysis by using the read_file tool to get the content of the files.
|
|
663
|
+
<files>
|
|
664
|
+
{{file_paths_str}}
|
|
665
|
+
</files>
|
|
666
|
+
{% endif %}
|
|
667
|
+
"""
|
|
668
|
+
return self._render_context()
|
|
669
|
+
|
|
670
|
+
|
|
671
|
+
def _render_context(self):
|
|
672
|
+
# 获取工具描述和示例
|
|
673
|
+
tool_descriptions = ToolRegistry.get_all_tool_descriptions()
|
|
674
|
+
tool_examples = ToolRegistry.get_all_tool_examples()
|
|
675
|
+
tool_case_docs = ToolRegistry.get_all_tools_case_docs()
|
|
676
|
+
tool_guidelines = ToolRegistry.get_all_tool_use_guidelines()
|
|
677
|
+
|
|
678
|
+
extra_docs = get_rules()
|
|
679
|
+
|
|
680
|
+
env_info = detect_env()
|
|
681
|
+
shell_type = "bash"
|
|
682
|
+
if shells.is_running_in_cmd():
|
|
683
|
+
shell_type = "cmd"
|
|
684
|
+
elif shells.is_running_in_powershell():
|
|
685
|
+
shell_type = "powershell"
|
|
686
|
+
|
|
687
|
+
file_paths_str = "\n".join([file_source.module_name for file_source in self.files.sources])
|
|
688
|
+
|
|
689
|
+
# 获取代理信息
|
|
690
|
+
agent_info = ""
|
|
691
|
+
agent_names = AgentHub.list_agents()
|
|
692
|
+
if agent_names:
|
|
693
|
+
agent_info = "Available Agents:\n"
|
|
694
|
+
for name in agent_names:
|
|
695
|
+
agent = AgentHub.get_agent(name)
|
|
696
|
+
if agent:
|
|
697
|
+
role = getattr(agent, "custom_system_prompt", "No description")
|
|
698
|
+
if name == self.name:
|
|
699
|
+
agent_info += f"- {name} (This is you, do not talk to yourself): {role[:100]}{'...' if len(role) > 100 else ''}\n"
|
|
700
|
+
else:
|
|
701
|
+
agent_info += f"- {name}: {role[:100]}{'...' if len(role) > 100 else ''}\n"
|
|
702
|
+
|
|
703
|
+
# 获取群组信息
|
|
704
|
+
group_info = ""
|
|
705
|
+
groups = AgentHub.get_all_groups()
|
|
706
|
+
if groups:
|
|
707
|
+
group_info = "Available Groups:\n"
|
|
708
|
+
for group in groups:
|
|
709
|
+
members = []
|
|
710
|
+
with group._members_lock:
|
|
711
|
+
members = [member.name for member in group.members]
|
|
712
|
+
group_info += f"- {group.name}: {len(members)} members ({', '.join(members)})\n"
|
|
713
|
+
|
|
714
|
+
return {
|
|
715
|
+
"conversation_history": self.conversation_history,
|
|
716
|
+
"env_info": env_info,
|
|
717
|
+
"shell_type": shell_type,
|
|
718
|
+
"shell_encoding": shells.get_terminal_encoding(),
|
|
719
|
+
"conversation_safe_zone_tokens": self.args.conversation_prune_safe_zone_tokens,
|
|
720
|
+
"os_distribution": shells.get_os_distribution(),
|
|
721
|
+
"current_user": shells.get_current_username(),
|
|
722
|
+
"current_project": os.path.abspath(self.args.source_dir),
|
|
723
|
+
"home_dir": os.path.expanduser("~"),
|
|
724
|
+
"files": self.files.to_str(),
|
|
725
|
+
"mcp_server_info": self.mcp_server_info,
|
|
726
|
+
"agent_info": agent_info,
|
|
727
|
+
"group_info": group_info,
|
|
728
|
+
"enable_active_context_in_generate": self.args.enable_active_context_in_generate,
|
|
729
|
+
"extra_docs": extra_docs,
|
|
730
|
+
"file_paths_str": file_paths_str,
|
|
731
|
+
"tool_descriptions": tool_descriptions,
|
|
732
|
+
"tool_examples": tool_examples,
|
|
733
|
+
"tool_case_docs": tool_case_docs,
|
|
734
|
+
"tool_guidelines": tool_guidelines,
|
|
735
|
+
"system_prompt": self.custom_system_prompt,
|
|
736
|
+
"name": self.name
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
def agentic_run(self, request: AgentRequest) -> Generator[Union[LLMOutputEvent, LLMThinkingEvent, ToolCallEvent, ToolResultEvent, CompletionEvent, ErrorEvent], None, None]:
|
|
740
|
+
"""
|
|
741
|
+
Analyzes the user request, interacts with the LLM, parses responses,
|
|
742
|
+
executes tools, and yields structured events for visualization until completion or error.
|
|
743
|
+
"""
|
|
744
|
+
logger.info(f"Starting analyze method with user input: {request.user_input[:50]}...")
|
|
745
|
+
system_prompt = self._system.prompt(request)
|
|
746
|
+
logger.info(f"Generated system prompt with length: {len(system_prompt)}")
|
|
747
|
+
|
|
748
|
+
# print(system_prompt)
|
|
749
|
+
self.agentic_conversations.clear()
|
|
750
|
+
conversations = self.agentic_conversations
|
|
751
|
+
conversations = [
|
|
752
|
+
{"role": "system", "content": system_prompt},
|
|
753
|
+
]
|
|
754
|
+
|
|
755
|
+
conversations.append({
|
|
756
|
+
"role": "user", "content": request.user_input
|
|
757
|
+
})
|
|
758
|
+
|
|
759
|
+
logger.info(
|
|
760
|
+
f"Initial conversation history size: {len(conversations)}")
|
|
761
|
+
|
|
762
|
+
logger.info(f"Conversation history: {json.dumps(conversations, indent=2,ensure_ascii=False)}")
|
|
763
|
+
|
|
764
|
+
iteration_count = 0
|
|
765
|
+
tool_executed = False
|
|
766
|
+
while True:
|
|
767
|
+
iteration_count += 1
|
|
768
|
+
logger.info(f"Starting LLM interaction cycle #{iteration_count}")
|
|
769
|
+
global_cancel.check_and_raise(token=self.args.event_file)
|
|
770
|
+
last_message = conversations[-1]
|
|
771
|
+
if last_message["role"] == "assistant":
|
|
772
|
+
logger.info(f"Last message is assistant, skipping LLM interaction cycle")
|
|
773
|
+
yield CompletionEvent(completion=AttemptCompletionTool(
|
|
774
|
+
result=last_message["content"],
|
|
775
|
+
command=""
|
|
776
|
+
), completion_xml="")
|
|
777
|
+
break
|
|
778
|
+
logger.info(
|
|
779
|
+
f"Starting LLM interaction cycle. History size: {len(conversations)}")
|
|
780
|
+
|
|
781
|
+
assistant_buffer = ""
|
|
782
|
+
logger.info("Initializing stream chat with LLM")
|
|
783
|
+
|
|
784
|
+
## 实际请求大模型
|
|
785
|
+
llm_response_gen = stream_chat_with_continue(
|
|
786
|
+
llm=self.llm,
|
|
787
|
+
conversations=conversations,
|
|
788
|
+
llm_config={}, # Placeholder for future LLM configs
|
|
789
|
+
args=self.args
|
|
790
|
+
)
|
|
791
|
+
|
|
792
|
+
logger.info("Starting to parse LLM response stream")
|
|
793
|
+
parsed_events = self.stream_and_parse_llm_response(
|
|
794
|
+
llm_response_gen)
|
|
795
|
+
|
|
796
|
+
event_count = 0
|
|
797
|
+
for event in parsed_events:
|
|
798
|
+
event_count += 1
|
|
799
|
+
logger.info(f"Processing event #{event_count}: {type(event).__name__}")
|
|
800
|
+
global_cancel.check_and_raise(token=self.args.event_file)
|
|
801
|
+
if isinstance(event, (LLMOutputEvent, LLMThinkingEvent)):
|
|
802
|
+
assistant_buffer += event.text
|
|
803
|
+
logger.debug(f"Accumulated {len(assistant_buffer)} chars in assistant buffer")
|
|
804
|
+
yield event # Yield text/thinking immediately for display
|
|
805
|
+
|
|
806
|
+
elif isinstance(event, ToolCallEvent):
|
|
807
|
+
tool_executed = True
|
|
808
|
+
tool_obj = event.tool
|
|
809
|
+
tool_name = type(tool_obj).__name__
|
|
810
|
+
logger.info(f"Tool call detected: {tool_name}")
|
|
811
|
+
tool_xml = event.tool_xml # Already reconstructed by parser
|
|
812
|
+
|
|
813
|
+
# Append assistant's thoughts and the tool call to history
|
|
814
|
+
logger.info(f"Adding assistant message with tool call to conversation history")
|
|
815
|
+
conversations.append({
|
|
816
|
+
"role": "assistant",
|
|
817
|
+
"content": assistant_buffer + tool_xml
|
|
818
|
+
})
|
|
819
|
+
assistant_buffer = "" # Reset buffer after tool call
|
|
820
|
+
|
|
821
|
+
yield event # Yield the ToolCallEvent for display
|
|
822
|
+
logger.info("Yielded ToolCallEvent")
|
|
823
|
+
|
|
824
|
+
# Handle AttemptCompletion separately as it ends the loop
|
|
825
|
+
if isinstance(tool_obj, AttemptCompletionTool):
|
|
826
|
+
logger.info(
|
|
827
|
+
"AttemptCompletionTool received. Finalizing session.")
|
|
828
|
+
logger.info(f"Completion result: {tool_obj.result[:50]}...")
|
|
829
|
+
yield CompletionEvent(completion=tool_obj, completion_xml=tool_xml)
|
|
830
|
+
logger.info(
|
|
831
|
+
"AgenticEdit analyze loop finished due to AttemptCompletion.")
|
|
832
|
+
save_formatted_log(self.args.source_dir, json.dumps(conversations, ensure_ascii=False), "agentic_conversation")
|
|
833
|
+
return
|
|
834
|
+
|
|
835
|
+
if isinstance(tool_obj, PlanModeRespondTool):
|
|
836
|
+
logger.info(
|
|
837
|
+
"PlanModeRespondTool received. Finalizing session.")
|
|
838
|
+
logger.info(f"Plan mode response: {tool_obj.response[:50]}...")
|
|
839
|
+
yield PlanModeRespondEvent(completion=tool_obj, completion_xml=tool_xml)
|
|
840
|
+
logger.info(
|
|
841
|
+
"AgenticEdit analyze loop finished due to PlanModeRespond.")
|
|
842
|
+
save_formatted_log(self.args.source_dir, json.dumps(conversations, ensure_ascii=False), "agentic_conversation")
|
|
843
|
+
return
|
|
844
|
+
|
|
845
|
+
# Resolve the tool
|
|
846
|
+
resolver_cls = ToolRegistry.get_resolver_for_tool(tool_obj)
|
|
847
|
+
if not resolver_cls:
|
|
848
|
+
logger.error(
|
|
849
|
+
f"No resolver implemented for tool {type(tool_obj).__name__}")
|
|
850
|
+
tool_result = ToolResult(
|
|
851
|
+
success=False, message="Error: Tool resolver not implemented.", content=None)
|
|
852
|
+
result_event = ToolResultEvent(tool_name=type(
|
|
853
|
+
tool_obj).__name__, result=tool_result)
|
|
854
|
+
error_xml = f"<tool_result tool_name='{type(tool_obj).__name__}' success='false'><message>Error: Tool resolver not implemented.</message><content></content></tool_result>"
|
|
855
|
+
else:
|
|
856
|
+
try:
|
|
857
|
+
logger.info(f"Creating resolver for tool: {tool_name}")
|
|
858
|
+
resolver = resolver_cls(
|
|
859
|
+
agent=self, tool=tool_obj, args=self.args)
|
|
860
|
+
logger.info(
|
|
861
|
+
f"Executing tool: {type(tool_obj).__name__} with params: {tool_obj.model_dump()}")
|
|
862
|
+
tool_result: ToolResult = resolver.resolve()
|
|
863
|
+
logger.info(
|
|
864
|
+
f"Tool Result: Success={tool_result.success}, Message='{tool_result.message}'")
|
|
865
|
+
result_event = ToolResultEvent(tool_name=type(
|
|
866
|
+
tool_obj).__name__, result=tool_result)
|
|
867
|
+
|
|
868
|
+
# Prepare XML for conversation history
|
|
869
|
+
logger.info("Preparing XML for conversation history")
|
|
870
|
+
escaped_message = xml.sax.saxutils.escape(
|
|
871
|
+
tool_result.message)
|
|
872
|
+
content_str = str(
|
|
873
|
+
tool_result.content) if tool_result.content is not None else ""
|
|
874
|
+
escaped_content = xml.sax.saxutils.escape(
|
|
875
|
+
content_str)
|
|
876
|
+
error_xml = (
|
|
877
|
+
f"<tool_result tool_name='{type(tool_obj).__name__}' success='{str(tool_result.success).lower()}'>"
|
|
878
|
+
f"<message>{escaped_message}</message>"
|
|
879
|
+
f"<content>{escaped_content}</content>"
|
|
880
|
+
f"</tool_result>"
|
|
881
|
+
)
|
|
882
|
+
except Exception as e:
|
|
883
|
+
logger.exception(
|
|
884
|
+
f"Error resolving tool {type(tool_obj).__name__}: {e}")
|
|
885
|
+
error_message = f"Critical Error during tool execution: {e}"
|
|
886
|
+
tool_result = ToolResult(
|
|
887
|
+
success=False, message=error_message, content=None)
|
|
888
|
+
result_event = ToolResultEvent(tool_name=type(
|
|
889
|
+
tool_obj).__name__, result=tool_result)
|
|
890
|
+
escaped_error = xml.sax.saxutils.escape(
|
|
891
|
+
error_message)
|
|
892
|
+
error_xml = f"<tool_result tool_name='{type(tool_obj).__name__}' success='false'><message>{escaped_error}</message><content></content></tool_result>"
|
|
893
|
+
|
|
894
|
+
yield result_event # Yield the ToolResultEvent for display
|
|
895
|
+
logger.info("Yielded ToolResultEvent")
|
|
896
|
+
|
|
897
|
+
# Append the tool result (as user message) to history
|
|
898
|
+
logger.info("Adding tool result to conversation history")
|
|
899
|
+
conversations.append({
|
|
900
|
+
"role": "user", # Simulating the user providing the tool result
|
|
901
|
+
"content": error_xml
|
|
902
|
+
})
|
|
903
|
+
logger.info(
|
|
904
|
+
f"Added tool result to conversations for tool {type(tool_obj).__name__}")
|
|
905
|
+
logger.info(f"Breaking LLM cycle after executing tool: {tool_name}")
|
|
906
|
+
break # After tool execution and result, break to start a new LLM cycle
|
|
907
|
+
|
|
908
|
+
elif isinstance(event, ErrorEvent):
|
|
909
|
+
logger.error(f"Error event occurred: {event.message}")
|
|
910
|
+
yield event # Pass through errors
|
|
911
|
+
# Optionally stop the process on parsing errors
|
|
912
|
+
# logger.error("Stopping analyze loop due to parsing error.")
|
|
913
|
+
# return
|
|
914
|
+
elif isinstance(event, TokenUsageEvent):
|
|
915
|
+
logger.info("Yielding token usage event")
|
|
916
|
+
yield event
|
|
917
|
+
|
|
918
|
+
if not tool_executed:
|
|
919
|
+
# No tool executed in this LLM response cycle
|
|
920
|
+
logger.info("LLM response finished without executing a tool.")
|
|
921
|
+
# Append any remaining assistant buffer to history if it wasn't followed by a tool
|
|
922
|
+
if assistant_buffer:
|
|
923
|
+
logger.info(f"Appending assistant buffer to history: {len(assistant_buffer)} chars")
|
|
924
|
+
last_message = conversations[-1]
|
|
925
|
+
if last_message["role"] != "assistant":
|
|
926
|
+
logger.info("Adding new assistant message")
|
|
927
|
+
conversations.append(
|
|
928
|
+
{"role": "assistant", "content": assistant_buffer})
|
|
929
|
+
elif last_message["role"] == "assistant":
|
|
930
|
+
logger.info("Appending to existing assistant message")
|
|
931
|
+
last_message["content"] += assistant_buffer
|
|
932
|
+
|
|
933
|
+
# 添加系统提示,要求LLM必须使用工具或明确结束,而不是直接退出
|
|
934
|
+
logger.info("Adding system reminder to use tools or attempt completion")
|
|
935
|
+
conversations.append({
|
|
936
|
+
"role": "user",
|
|
937
|
+
"content": "NOTE: You must use an appropriate tool (such as read_file, write_to_file, execute_command, etc.) or explicitly complete the task (using attempt_completion). Do not provide text responses without taking concrete actions. Please select a suitable tool to continue based on the user's task."
|
|
938
|
+
})
|
|
939
|
+
# 继续循环,让 LLM 再思考,而不是 break
|
|
940
|
+
logger.info("Continuing the LLM interaction loop without breaking")
|
|
941
|
+
continue
|
|
942
|
+
|
|
943
|
+
logger.info(f"AgenticEdit analyze loop finished after {iteration_count} iterations.")
|
|
944
|
+
save_formatted_log(self.args.source_dir, json.dumps(conversations, ensure_ascii=False), "agentic_conversation")
|
|
945
|
+
|
|
946
|
+
def stream_and_parse_llm_response(
|
|
947
|
+
self, generator: Generator[Tuple[str, Any], None, None]
|
|
948
|
+
) -> Generator[Union[LLMOutputEvent, LLMThinkingEvent, ToolCallEvent, ErrorEvent], None, None]:
|
|
949
|
+
"""
|
|
950
|
+
Streamingly parses the LLM response generator, distinguishing between
|
|
951
|
+
plain text, thinking blocks, and tool usage blocks, yielding corresponding Event models.
|
|
952
|
+
|
|
953
|
+
Args:
|
|
954
|
+
generator: An iterator yielding (content, metadata) tuples from the LLM stream.
|
|
955
|
+
|
|
956
|
+
Yields:
|
|
957
|
+
Union[LLMOutputEvent, LLMThinkingEvent, ToolCallEvent, ErrorEvent]: Events representing
|
|
958
|
+
different parts of the LLM's response.
|
|
959
|
+
"""
|
|
960
|
+
buffer = ""
|
|
961
|
+
in_tool_block = False
|
|
962
|
+
in_thinking_block = False
|
|
963
|
+
current_tool_tag = None
|
|
964
|
+
tool_start_pattern = re.compile(
|
|
965
|
+
r"<([a-zA-Z0-9_]+)>") # Matches tool tags
|
|
966
|
+
thinking_start_tag = "<thinking>"
|
|
967
|
+
thinking_end_tag = "</thinking>"
|
|
968
|
+
|
|
969
|
+
def parse_tool_xml(tool_xml: str, tool_tag: str) -> Optional[BaseTool]:
|
|
970
|
+
"""Minimal parser for tool XML string."""
|
|
971
|
+
params = {}
|
|
972
|
+
try:
|
|
973
|
+
# Find content between <tool_tag> and </tool_tag>
|
|
974
|
+
inner_xml_match = re.search(
|
|
975
|
+
rf"<{tool_tag}>(.*?)</{tool_tag}>", tool_xml, re.DOTALL)
|
|
976
|
+
if not inner_xml_match:
|
|
977
|
+
logger.error(
|
|
978
|
+
f"Could not find content within <{tool_tag}>...</{tool_tag}>")
|
|
979
|
+
return None
|
|
980
|
+
inner_xml = inner_xml_match.group(1).strip()
|
|
981
|
+
|
|
982
|
+
# Find <param>value</param> pairs within the inner content
|
|
983
|
+
pattern = re.compile(r"<([a-zA-Z0-9_]+)>(.*?)</\1>", re.DOTALL)
|
|
984
|
+
for m in pattern.finditer(inner_xml):
|
|
985
|
+
key = m.group(1)
|
|
986
|
+
# Basic unescaping (might need more robust unescaping if complex values are used)
|
|
987
|
+
val = xml.sax.saxutils.unescape(m.group(2))
|
|
988
|
+
params[key] = val
|
|
989
|
+
|
|
990
|
+
tool_cls = ToolRegistry.get_model_for_tag(tool_tag)
|
|
991
|
+
if tool_cls:
|
|
992
|
+
# Attempt to handle boolean conversion specifically for requires_approval
|
|
993
|
+
if 'requires_approval' in params:
|
|
994
|
+
params['requires_approval'] = params['requires_approval'].lower(
|
|
995
|
+
) == 'true'
|
|
996
|
+
# Attempt to handle JSON parsing for ask_followup_question_tool
|
|
997
|
+
if tool_tag == 'ask_followup_question' and 'options' in params:
|
|
998
|
+
try:
|
|
999
|
+
params['options'] = json.loads(
|
|
1000
|
+
params['options'])
|
|
1001
|
+
except json.JSONDecodeError:
|
|
1002
|
+
logger.warning(
|
|
1003
|
+
f"Could not decode JSON options for ask_followup_question_tool: {params['options']}")
|
|
1004
|
+
# Keep as string or handle error? Let's keep as string for now.
|
|
1005
|
+
pass
|
|
1006
|
+
if tool_tag == 'plan_mode_respond' and 'options' in params:
|
|
1007
|
+
try:
|
|
1008
|
+
params['options'] = json.loads(
|
|
1009
|
+
params['options'])
|
|
1010
|
+
except json.JSONDecodeError:
|
|
1011
|
+
logger.warning(
|
|
1012
|
+
f"Could not decode JSON options for plan_mode_respond_tool: {params['options']}")
|
|
1013
|
+
# Handle recursive for list_files
|
|
1014
|
+
if tool_tag == 'list_files' and 'recursive' in params:
|
|
1015
|
+
params['recursive'] = params['recursive'].lower() == 'true'
|
|
1016
|
+
return tool_cls(**params)
|
|
1017
|
+
else:
|
|
1018
|
+
logger.error(f"Tool class not found for tag: {tool_tag}")
|
|
1019
|
+
return None
|
|
1020
|
+
except Exception as e:
|
|
1021
|
+
logger.exception(
|
|
1022
|
+
f"Failed to parse tool XML for <{tool_tag}>: {e}\nXML:\n{tool_xml}")
|
|
1023
|
+
return None
|
|
1024
|
+
|
|
1025
|
+
meta_holder = byzerllm.MetaHolder()
|
|
1026
|
+
for content_chunk, metadata in generator:
|
|
1027
|
+
global_cancel.check_and_raise(token=self.args.event_file)
|
|
1028
|
+
meta_holder.meta = metadata
|
|
1029
|
+
if not content_chunk:
|
|
1030
|
+
continue
|
|
1031
|
+
buffer += content_chunk
|
|
1032
|
+
|
|
1033
|
+
while True:
|
|
1034
|
+
# Check for transitions: thinking -> text, tool -> text, text -> thinking, text -> tool
|
|
1035
|
+
next_event_pos = len(buffer)
|
|
1036
|
+
found_event = False
|
|
1037
|
+
|
|
1038
|
+
# 1. Check for </thinking> if inside thinking block
|
|
1039
|
+
if in_thinking_block:
|
|
1040
|
+
end_think_pos = buffer.find(thinking_end_tag)
|
|
1041
|
+
if end_think_pos != -1:
|
|
1042
|
+
thinking_content = buffer[:end_think_pos]
|
|
1043
|
+
yield LLMThinkingEvent(text=thinking_content)
|
|
1044
|
+
buffer = buffer[end_think_pos + len(thinking_end_tag):]
|
|
1045
|
+
in_thinking_block = False
|
|
1046
|
+
found_event = True
|
|
1047
|
+
continue # Restart loop with updated buffer/state
|
|
1048
|
+
else:
|
|
1049
|
+
# Need more data to close thinking block
|
|
1050
|
+
break
|
|
1051
|
+
|
|
1052
|
+
# 2. Check for </tool_tag> if inside tool block
|
|
1053
|
+
elif in_tool_block:
|
|
1054
|
+
end_tag = f"</{current_tool_tag}>"
|
|
1055
|
+
end_tool_pos = buffer.find(end_tag)
|
|
1056
|
+
if end_tool_pos != -1:
|
|
1057
|
+
tool_block_end_index = end_tool_pos + len(end_tag)
|
|
1058
|
+
tool_xml = buffer[:tool_block_end_index]
|
|
1059
|
+
tool_obj = parse_tool_xml(tool_xml, current_tool_tag)
|
|
1060
|
+
|
|
1061
|
+
if tool_obj:
|
|
1062
|
+
# Reconstruct the XML accurately here AFTER successful parsing
|
|
1063
|
+
# This ensures the XML yielded matches what was parsed.
|
|
1064
|
+
reconstructed_xml = self._reconstruct_tool_xml(
|
|
1065
|
+
tool_obj)
|
|
1066
|
+
if reconstructed_xml.startswith("<error>"):
|
|
1067
|
+
yield ErrorEvent(message=f"Failed to reconstruct XML for tool {current_tool_tag}")
|
|
1068
|
+
else:
|
|
1069
|
+
yield ToolCallEvent(tool=tool_obj, tool_xml=reconstructed_xml)
|
|
1070
|
+
else:
|
|
1071
|
+
yield ErrorEvent(message=f"Failed to parse tool: <{current_tool_tag}>")
|
|
1072
|
+
# Optionally yield the raw XML as plain text?
|
|
1073
|
+
# yield LLMOutputEvent(text=tool_xml)
|
|
1074
|
+
|
|
1075
|
+
buffer = buffer[tool_block_end_index:]
|
|
1076
|
+
in_tool_block = False
|
|
1077
|
+
current_tool_tag = None
|
|
1078
|
+
found_event = True
|
|
1079
|
+
continue # Restart loop
|
|
1080
|
+
else:
|
|
1081
|
+
# Need more data to close tool block
|
|
1082
|
+
break
|
|
1083
|
+
|
|
1084
|
+
# 3. Check for <thinking> or <tool_tag> if in plain text state
|
|
1085
|
+
else:
|
|
1086
|
+
start_think_pos = buffer.find(thinking_start_tag)
|
|
1087
|
+
tool_match = tool_start_pattern.search(buffer)
|
|
1088
|
+
start_tool_pos = tool_match.start() if tool_match else -1
|
|
1089
|
+
tool_name = tool_match.group(1) if tool_match else None
|
|
1090
|
+
|
|
1091
|
+
# Determine which tag comes first (if any)
|
|
1092
|
+
first_tag_pos = -1
|
|
1093
|
+
is_thinking = False
|
|
1094
|
+
is_tool = False
|
|
1095
|
+
|
|
1096
|
+
if start_think_pos != -1 and (start_tool_pos == -1 or start_think_pos < start_tool_pos):
|
|
1097
|
+
first_tag_pos = start_think_pos
|
|
1098
|
+
is_thinking = True
|
|
1099
|
+
elif start_tool_pos != -1 and (start_think_pos == -1 or start_tool_pos < start_think_pos):
|
|
1100
|
+
# Check if it's a known tool
|
|
1101
|
+
if tool_name in ToolRegistry.get_tag_model_map():
|
|
1102
|
+
first_tag_pos = start_tool_pos
|
|
1103
|
+
is_tool = True
|
|
1104
|
+
else:
|
|
1105
|
+
# Unknown tag, treat as text for now, let buffer grow
|
|
1106
|
+
pass
|
|
1107
|
+
|
|
1108
|
+
if first_tag_pos != -1: # Found either <thinking> or a known <tool>
|
|
1109
|
+
# Yield preceding text if any
|
|
1110
|
+
preceding_text = buffer[:first_tag_pos]
|
|
1111
|
+
if preceding_text:
|
|
1112
|
+
yield LLMOutputEvent(text=preceding_text)
|
|
1113
|
+
|
|
1114
|
+
# Transition state
|
|
1115
|
+
if is_thinking:
|
|
1116
|
+
buffer = buffer[first_tag_pos +
|
|
1117
|
+
len(thinking_start_tag):]
|
|
1118
|
+
in_thinking_block = True
|
|
1119
|
+
elif is_tool:
|
|
1120
|
+
# Keep the starting tag
|
|
1121
|
+
buffer = buffer[first_tag_pos:]
|
|
1122
|
+
in_tool_block = True
|
|
1123
|
+
current_tool_tag = tool_name
|
|
1124
|
+
|
|
1125
|
+
found_event = True
|
|
1126
|
+
continue # Restart loop
|
|
1127
|
+
|
|
1128
|
+
else:
|
|
1129
|
+
# No tags found, or only unknown tags found. Need more data or end of stream.
|
|
1130
|
+
# Yield text chunk but keep some buffer for potential tag start
|
|
1131
|
+
# Keep last 100 chars
|
|
1132
|
+
split_point = max(0, len(buffer) - 100)
|
|
1133
|
+
text_to_yield = buffer[:split_point]
|
|
1134
|
+
if text_to_yield:
|
|
1135
|
+
yield LLMOutputEvent(text=text_to_yield)
|
|
1136
|
+
buffer = buffer[split_point:]
|
|
1137
|
+
break # Need more data
|
|
1138
|
+
|
|
1139
|
+
# If no event was processed in this iteration, break inner loop
|
|
1140
|
+
if not found_event:
|
|
1141
|
+
break
|
|
1142
|
+
|
|
1143
|
+
yield TokenUsageEvent(usage=meta_holder.meta)
|
|
1144
|
+
|
|
1145
|
+
# After generator exhausted, yield any remaining content
|
|
1146
|
+
if in_thinking_block:
|
|
1147
|
+
# Unterminated thinking block
|
|
1148
|
+
yield ErrorEvent(message="Stream ended with unterminated <thinking> block.")
|
|
1149
|
+
if buffer:
|
|
1150
|
+
# Yield remaining as thinking
|
|
1151
|
+
yield LLMThinkingEvent(text=buffer)
|
|
1152
|
+
elif in_tool_block:
|
|
1153
|
+
# Unterminated tool block
|
|
1154
|
+
yield ErrorEvent(message=f"Stream ended with unterminated <{current_tool_tag}> block.")
|
|
1155
|
+
if buffer:
|
|
1156
|
+
yield LLMOutputEvent(text=buffer) # Yield remaining as text
|
|
1157
|
+
elif buffer:
|
|
1158
|
+
# Yield remaining plain text
|
|
1159
|
+
yield LLMOutputEvent(text=buffer)
|
|
1160
|
+
|
|
1161
|
+
def run_with_events(self, request: AgentRequest):
|
|
1162
|
+
"""
|
|
1163
|
+
运行代理过程,将内部事件转换为标准事件系统格式,并通过事件管理器写入
|
|
1164
|
+
|
|
1165
|
+
Args:
|
|
1166
|
+
request: 代理请求
|
|
1167
|
+
"""
|
|
1168
|
+
event_manager = get_event_manager(self.args.event_file)
|
|
1169
|
+
base_url = "/agent/base"
|
|
1170
|
+
|
|
1171
|
+
try:
|
|
1172
|
+
event_stream = self.analyze(request)
|
|
1173
|
+
for agent_event in event_stream:
|
|
1174
|
+
content = None
|
|
1175
|
+
metadata = EventMetadata(
|
|
1176
|
+
action_file=self.args.file,
|
|
1177
|
+
is_streaming=False,
|
|
1178
|
+
stream_out_type=base_url)
|
|
1179
|
+
|
|
1180
|
+
if isinstance(agent_event, LLMThinkingEvent):
|
|
1181
|
+
content = EventContentCreator.create_stream_thinking(
|
|
1182
|
+
content=agent_event.text)
|
|
1183
|
+
metadata.is_streaming = True
|
|
1184
|
+
metadata.path = f"{base_url}/thinking"
|
|
1185
|
+
event_manager.write_stream(
|
|
1186
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1187
|
+
elif isinstance(agent_event, LLMOutputEvent):
|
|
1188
|
+
content = EventContentCreator.create_stream_content(
|
|
1189
|
+
content=agent_event.text)
|
|
1190
|
+
metadata.is_streaming = True
|
|
1191
|
+
metadata.path = f"{base_url}/output"
|
|
1192
|
+
event_manager.write_stream(content=content.to_dict(),
|
|
1193
|
+
metadata=metadata.to_dict())
|
|
1194
|
+
elif isinstance(agent_event, ToolCallEvent):
|
|
1195
|
+
tool_name = type(agent_event.tool).__name__
|
|
1196
|
+
metadata.path = f"{base_url}/tool/call"
|
|
1197
|
+
content = EventContentCreator.create_result(
|
|
1198
|
+
content={
|
|
1199
|
+
"tool_name": tool_name,
|
|
1200
|
+
**agent_event.tool.model_dump()
|
|
1201
|
+
},
|
|
1202
|
+
metadata={}
|
|
1203
|
+
)
|
|
1204
|
+
event_manager.write_result(
|
|
1205
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1206
|
+
elif isinstance(agent_event, ToolResultEvent):
|
|
1207
|
+
metadata.path = f"{base_url}/tool/result"
|
|
1208
|
+
content = EventContentCreator.create_result(
|
|
1209
|
+
content={
|
|
1210
|
+
"tool_name": agent_event.tool_name,
|
|
1211
|
+
**agent_event.result.model_dump()
|
|
1212
|
+
},
|
|
1213
|
+
metadata={}
|
|
1214
|
+
)
|
|
1215
|
+
event_manager.write_result(
|
|
1216
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1217
|
+
elif isinstance(agent_event, TokenUsageEvent):
|
|
1218
|
+
# 获取模型信息以计算价格
|
|
1219
|
+
model_name = ",".join(llms_utils.get_llm_names(self.llm))
|
|
1220
|
+
model_info = llms_utils.get_model_info(
|
|
1221
|
+
model_name, self.args.product_mode) or {}
|
|
1222
|
+
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
1223
|
+
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
1224
|
+
|
|
1225
|
+
# 计算成本
|
|
1226
|
+
last_meta = agent_event.usage
|
|
1227
|
+
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # 转换为百万
|
|
1228
|
+
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000
|
|
1229
|
+
|
|
1230
|
+
# 记录Token使用详情
|
|
1231
|
+
logger.info(f"Token Usage Details: Model={model_name}, Input Tokens={last_meta.input_tokens_count}, Output Tokens={last_meta.generated_tokens_count}, Input Cost=${input_cost:.6f}, Output Cost=${output_cost:.6f}")
|
|
1232
|
+
|
|
1233
|
+
# 写入Token统计事件
|
|
1234
|
+
get_event_manager(self.args.event_file).write_result(
|
|
1235
|
+
EventContentCreator.create_result(content=EventContentCreator.ResultTokenStatContent(
|
|
1236
|
+
model_name=model_name,
|
|
1237
|
+
elapsed_time=0.0,
|
|
1238
|
+
first_token_time=last_meta.first_token_time,
|
|
1239
|
+
input_tokens=last_meta.input_tokens_count,
|
|
1240
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
1241
|
+
input_cost=input_cost,
|
|
1242
|
+
output_cost=output_cost
|
|
1243
|
+
).to_dict()), metadata=metadata.to_dict())
|
|
1244
|
+
|
|
1245
|
+
elif isinstance(agent_event, CompletionEvent):
|
|
1246
|
+
# 在这里完成实际合并
|
|
1247
|
+
try:
|
|
1248
|
+
self.apply_changes()
|
|
1249
|
+
except Exception as e:
|
|
1250
|
+
logger.exception(f"Error merging shadow changes to project: {e}")
|
|
1251
|
+
|
|
1252
|
+
metadata.path = f"{base_url}/completion"
|
|
1253
|
+
content = EventContentCreator.create_completion(
|
|
1254
|
+
success_code="AGENT_COMPLETE",
|
|
1255
|
+
success_message="Agent attempted task completion.",
|
|
1256
|
+
result={
|
|
1257
|
+
"response": agent_event.completion.result
|
|
1258
|
+
}
|
|
1259
|
+
)
|
|
1260
|
+
event_manager.write_completion(
|
|
1261
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1262
|
+
elif isinstance(agent_event, ErrorEvent):
|
|
1263
|
+
metadata.path = f"{base_url}/error"
|
|
1264
|
+
content = EventContentCreator.create_error(
|
|
1265
|
+
error_code="AGENT_ERROR",
|
|
1266
|
+
error_message=agent_event.message,
|
|
1267
|
+
details={"agent_event_type": "ErrorEvent"}
|
|
1268
|
+
)
|
|
1269
|
+
event_manager.write_error(
|
|
1270
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1271
|
+
else:
|
|
1272
|
+
metadata.path = f"{base_url}/error"
|
|
1273
|
+
logger.warning(f"未处理的代理事件类型: {type(agent_event)}")
|
|
1274
|
+
content = EventContentCreator.create_error(
|
|
1275
|
+
error_code="AGENT_ERROR",
|
|
1276
|
+
error_message=f"未处理的代理事件类型: {type(agent_event)}",
|
|
1277
|
+
details={"agent_event_type": type(agent_event).__name__}
|
|
1278
|
+
)
|
|
1279
|
+
event_manager.write_error(
|
|
1280
|
+
content=content.to_dict(), metadata=metadata.to_dict())
|
|
1281
|
+
|
|
1282
|
+
except Exception as e:
|
|
1283
|
+
logger.exception("代理执行过程中发生意外错误:")
|
|
1284
|
+
metadata = EventMetadata(
|
|
1285
|
+
action_file=self.args.file,
|
|
1286
|
+
is_streaming=False,
|
|
1287
|
+
stream_out_type=f"{base_url}/error")
|
|
1288
|
+
error_content = EventContentCreator.create_error(
|
|
1289
|
+
error_code="AGENT_FATAL_ERROR",
|
|
1290
|
+
error_message=f"发生意外错误: {str(e)}",
|
|
1291
|
+
details={"exception_type": type(e).__name__}
|
|
1292
|
+
)
|
|
1293
|
+
event_manager.write_error(
|
|
1294
|
+
content=error_content.to_dict(), metadata=metadata.to_dict())
|
|
1295
|
+
raise e
|
|
1296
|
+
|
|
1297
|
+
def run_in_terminal(self, request: AgentRequest):
|
|
1298
|
+
"""
|
|
1299
|
+
在终端中运行代理过程,使用Rich库流式显示交互
|
|
1300
|
+
|
|
1301
|
+
Args:
|
|
1302
|
+
request: 代理请求
|
|
1303
|
+
"""
|
|
1304
|
+
start_time = time.time()
|
|
1305
|
+
console = Console()
|
|
1306
|
+
project_name = os.path.basename(os.path.abspath(self.args.source_dir))
|
|
1307
|
+
console.rule(agentic_lang.get_message_with_format("agent_start", project_name=project_name))
|
|
1308
|
+
console.print(Panel(
|
|
1309
|
+
agentic_lang.get_message_with_format("user_input", input=request.user_input),
|
|
1310
|
+
title=agentic_lang.get_message("user_input_title"),
|
|
1311
|
+
border_style="blue"))
|
|
1312
|
+
|
|
1313
|
+
# 添加token累计变量
|
|
1314
|
+
total_input_tokens = 0
|
|
1315
|
+
total_output_tokens = 0
|
|
1316
|
+
total_input_cost = 0.0
|
|
1317
|
+
total_output_cost = 0.0
|
|
1318
|
+
model_name = ""
|
|
1319
|
+
|
|
1320
|
+
try:
|
|
1321
|
+
event_stream = self.agentic_run(request)
|
|
1322
|
+
for event in event_stream:
|
|
1323
|
+
if isinstance(event, TokenUsageEvent):
|
|
1324
|
+
# 获取模型信息以计算价格
|
|
1325
|
+
from autocoder.utils import llms as llm_utils
|
|
1326
|
+
model_name = ",".join(llm_utils.get_llm_names(self.llm))
|
|
1327
|
+
model_info = llm_utils.get_model_info(
|
|
1328
|
+
model_name, self.args.product_mode) or {}
|
|
1329
|
+
input_price = model_info.get(
|
|
1330
|
+
"input_price", 0.0) if model_info else 0.0
|
|
1331
|
+
output_price = model_info.get(
|
|
1332
|
+
"output_price", 0.0) if model_info else 0.0
|
|
1333
|
+
|
|
1334
|
+
# 计算成本
|
|
1335
|
+
last_meta = event.usage
|
|
1336
|
+
input_cost = (last_meta.input_tokens_count *
|
|
1337
|
+
input_price) / 1000000 # 转换为百万
|
|
1338
|
+
output_cost = (
|
|
1339
|
+
last_meta.generated_tokens_count * output_price) / 1000000
|
|
1340
|
+
|
|
1341
|
+
# 累计token使用情况
|
|
1342
|
+
total_input_tokens += last_meta.input_tokens_count
|
|
1343
|
+
total_output_tokens += last_meta.generated_tokens_count
|
|
1344
|
+
total_input_cost += input_cost
|
|
1345
|
+
total_output_cost += output_cost
|
|
1346
|
+
|
|
1347
|
+
# 记录日志
|
|
1348
|
+
logger.info(agentic_lang.get_message_with_format(
|
|
1349
|
+
"token_usage_log",
|
|
1350
|
+
model=model_name,
|
|
1351
|
+
input_tokens=last_meta.input_tokens_count,
|
|
1352
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
1353
|
+
input_cost=input_cost,
|
|
1354
|
+
output_cost=output_cost))
|
|
1355
|
+
|
|
1356
|
+
elif isinstance(event, LLMThinkingEvent):
|
|
1357
|
+
# 以较不显眼的样式呈现思考内容
|
|
1358
|
+
console.print(f"[grey50]{event.text}[/grey50]", end="")
|
|
1359
|
+
elif isinstance(event, LLMOutputEvent):
|
|
1360
|
+
# 打印常规LLM输出
|
|
1361
|
+
console.print(event.text, end="")
|
|
1362
|
+
elif isinstance(event, ToolCallEvent):
|
|
1363
|
+
# 跳过显示完成工具的调用
|
|
1364
|
+
if hasattr(event.tool, "result") and hasattr(event.tool, "command"):
|
|
1365
|
+
continue # 不显示完成工具的调用
|
|
1366
|
+
|
|
1367
|
+
tool_name = type(event.tool).__name__
|
|
1368
|
+
# 使用工具展示函数(需要自行实现)
|
|
1369
|
+
display_content = get_tool_display_message(event.tool)
|
|
1370
|
+
console.print(Panel(
|
|
1371
|
+
display_content,
|
|
1372
|
+
title=agentic_lang.get_message_with_format("tool_operation_title", tool_name=tool_name),
|
|
1373
|
+
border_style="blue",
|
|
1374
|
+
title_align="left"))
|
|
1375
|
+
|
|
1376
|
+
elif isinstance(event, ToolResultEvent):
|
|
1377
|
+
# 跳过显示完成工具的结果
|
|
1378
|
+
if event.tool_name == "AttemptCompletionTool":
|
|
1379
|
+
continue
|
|
1380
|
+
|
|
1381
|
+
result = event.result
|
|
1382
|
+
title = agentic_lang.get_message_with_format("tool_result_success_title", tool_name=event.tool_name) if result.success else agentic_lang.get_message_with_format("tool_result_failure_title", tool_name=event.tool_name)
|
|
1383
|
+
border_style = "green" if result.success else "red"
|
|
1384
|
+
success_status = agentic_lang.get_message("success_status") if result.success else agentic_lang.get_message("failure_status")
|
|
1385
|
+
base_content = agentic_lang.get_message_with_format("status", status=success_status) + "\n"
|
|
1386
|
+
base_content += agentic_lang.get_message_with_format("message", message=result.message) + "\n"
|
|
1387
|
+
|
|
1388
|
+
# 格式化内容函数
|
|
1389
|
+
def _format_content(content):
|
|
1390
|
+
if len(content) > 200:
|
|
1391
|
+
return f"{content[:100]}\n...\n{content[-100:]}"
|
|
1392
|
+
else:
|
|
1393
|
+
return content
|
|
1394
|
+
|
|
1395
|
+
# 首先准备基本信息面板
|
|
1396
|
+
panel_content = [base_content]
|
|
1397
|
+
syntax_content = None
|
|
1398
|
+
|
|
1399
|
+
if result.content is not None:
|
|
1400
|
+
content_str = ""
|
|
1401
|
+
try:
|
|
1402
|
+
if isinstance(result.content, (dict, list)):
|
|
1403
|
+
import json
|
|
1404
|
+
content_str = json.dumps(
|
|
1405
|
+
result.content, indent=2, ensure_ascii=False)
|
|
1406
|
+
from rich.syntax import Syntax
|
|
1407
|
+
syntax_content = Syntax(
|
|
1408
|
+
content_str, "json", theme="default", line_numbers=False)
|
|
1409
|
+
elif isinstance(result.content, str) and ('\n' in result.content or result.content.strip().startswith('<')):
|
|
1410
|
+
# 代码或XML/HTML的启发式判断
|
|
1411
|
+
from rich.syntax import Syntax
|
|
1412
|
+
lexer = "python" # 默认猜测
|
|
1413
|
+
if event.tool_name == "ReadFileTool" and isinstance(event.result.message, str):
|
|
1414
|
+
# 尝试从消息中的文件扩展名猜测lexer
|
|
1415
|
+
if ".py" in event.result.message:
|
|
1416
|
+
lexer = "python"
|
|
1417
|
+
elif ".js" in event.result.message:
|
|
1418
|
+
lexer = "javascript"
|
|
1419
|
+
elif ".ts" in event.result.message:
|
|
1420
|
+
lexer = "typescript"
|
|
1421
|
+
elif ".html" in event.result.message:
|
|
1422
|
+
lexer = "html"
|
|
1423
|
+
elif ".css" in event.result.message:
|
|
1424
|
+
lexer = "css"
|
|
1425
|
+
elif ".json" in event.result.message:
|
|
1426
|
+
lexer = "json"
|
|
1427
|
+
elif ".xml" in event.result.message:
|
|
1428
|
+
lexer = "xml"
|
|
1429
|
+
elif ".md" in event.result.message:
|
|
1430
|
+
lexer = "markdown"
|
|
1431
|
+
else:
|
|
1432
|
+
lexer = "text" # 备用lexer
|
|
1433
|
+
elif event.tool_name == "ExecuteCommandTool":
|
|
1434
|
+
lexer = "shell"
|
|
1435
|
+
else:
|
|
1436
|
+
lexer = "text"
|
|
1437
|
+
|
|
1438
|
+
syntax_content = Syntax(
|
|
1439
|
+
_format_content(result.content), lexer, theme="default", line_numbers=True)
|
|
1440
|
+
else:
|
|
1441
|
+
content_str = str(result.content)
|
|
1442
|
+
# 直接附加简单字符串内容
|
|
1443
|
+
panel_content.append(
|
|
1444
|
+
_format_content(content_str))
|
|
1445
|
+
except Exception as e:
|
|
1446
|
+
logger.warning(agentic_lang.get_message_with_format("format_tool_error", error=str(e)))
|
|
1447
|
+
panel_content.append(
|
|
1448
|
+
# 备用
|
|
1449
|
+
_format_content(str(result.content)))
|
|
1450
|
+
|
|
1451
|
+
# 打印基本信息面板
|
|
1452
|
+
console.print(Panel("\n".join(
|
|
1453
|
+
panel_content), title=title, border_style=border_style, title_align="left"))
|
|
1454
|
+
# 单独打印语法高亮内容(如果存在)
|
|
1455
|
+
if syntax_content:
|
|
1456
|
+
console.print(syntax_content)
|
|
1457
|
+
|
|
1458
|
+
elif isinstance(event, CompletionEvent):
|
|
1459
|
+
# 在这里完成实际合并
|
|
1460
|
+
try:
|
|
1461
|
+
self.apply_changes()
|
|
1462
|
+
except Exception as e:
|
|
1463
|
+
logger.exception(agentic_lang.get_message_with_format("shadow_merge_error", error=str(e)))
|
|
1464
|
+
|
|
1465
|
+
from rich.markdown import Markdown
|
|
1466
|
+
console.print(Panel(Markdown(event.completion.result),
|
|
1467
|
+
title=agentic_lang.get_message("completion_title"),
|
|
1468
|
+
border_style="green", title_align="left"))
|
|
1469
|
+
if event.completion.command:
|
|
1470
|
+
console.print(agentic_lang.get_message_with_format("suggested_command", command=event.completion.command))
|
|
1471
|
+
elif isinstance(event, ErrorEvent):
|
|
1472
|
+
console.print(Panel(
|
|
1473
|
+
agentic_lang.get_message_with_format("error_content", message=event.message),
|
|
1474
|
+
title=agentic_lang.get_message("error_title"),
|
|
1475
|
+
border_style="red", title_align="left"))
|
|
1476
|
+
|
|
1477
|
+
time.sleep(0.1) # 小延迟以获得更好的视觉流
|
|
1478
|
+
|
|
1479
|
+
except Exception as e:
|
|
1480
|
+
logger.exception(agentic_lang.get_message("unexpected_error"))
|
|
1481
|
+
console.print(Panel(
|
|
1482
|
+
agentic_lang.get_message_with_format("fatal_error_content", error=str(e)),
|
|
1483
|
+
title=agentic_lang.get_message("fatal_error_title"),
|
|
1484
|
+
border_style="red"))
|
|
1485
|
+
raise e
|
|
1486
|
+
finally:
|
|
1487
|
+
# 在结束时打印累计的token使用情况
|
|
1488
|
+
duration = time.time() - start_time
|
|
1489
|
+
self.printer.print_in_terminal(
|
|
1490
|
+
"code_generation_complete",
|
|
1491
|
+
duration=duration,
|
|
1492
|
+
input_tokens=total_input_tokens,
|
|
1493
|
+
output_tokens=total_output_tokens,
|
|
1494
|
+
input_cost=total_input_cost,
|
|
1495
|
+
output_cost=total_output_cost,
|
|
1496
|
+
speed=0.0,
|
|
1497
|
+
model_names=model_name,
|
|
1498
|
+
sampling_count=1
|
|
1499
|
+
)
|
|
1500
|
+
console.rule(agentic_lang.get_message("agent_execution_complete"))
|
|
1501
|
+
|
|
1502
|
+
def apply_pre_changes(self):
|
|
1503
|
+
# get the file name
|
|
1504
|
+
file_name = os.path.basename(self.args.file)
|
|
1505
|
+
if not self.args.skip_commit:
|
|
1506
|
+
try:
|
|
1507
|
+
get_event_manager(self.args.event_file).write_result(
|
|
1508
|
+
EventContentCreator.create_result(
|
|
1509
|
+
content=self.printer.get_message_from_key("/agent/edit/apply_pre_changes")), metadata=EventMetadata(
|
|
1510
|
+
action_file=self.args.file,
|
|
1511
|
+
is_streaming=False,
|
|
1512
|
+
path="/agent/edit/apply_pre_changes",
|
|
1513
|
+
stream_out_type="/agent/edit").to_dict())
|
|
1514
|
+
git_utils.commit_changes(
|
|
1515
|
+
self.args.source_dir, f"auto_coder_pre_{file_name}")
|
|
1516
|
+
except Exception as e:
|
|
1517
|
+
self.printer.print_in_terminal("git_init_required",
|
|
1518
|
+
source_dir=self.args.source_dir, error=str(e))
|
|
1519
|
+
return
|
|
1520
|
+
|
|
1521
|
+
def apply_changes(self):
|
|
1522
|
+
"""
|
|
1523
|
+
Apply all tracked file changes to the original project directory.
|
|
1524
|
+
"""
|
|
1525
|
+
for (file_path, change) in self.get_all_file_changes().items():
|
|
1526
|
+
# Ensure the directory exists before writing the file
|
|
1527
|
+
dir_path = os.path.dirname(file_path)
|
|
1528
|
+
if dir_path: # Ensure dir_path is not empty (for files in root)
|
|
1529
|
+
os.makedirs(dir_path, exist_ok=True)
|
|
1530
|
+
|
|
1531
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
1532
|
+
f.write(change.content)
|
|
1533
|
+
|
|
1534
|
+
if len(self.get_all_file_changes()) > 0:
|
|
1535
|
+
if not self.args.skip_commit:
|
|
1536
|
+
try:
|
|
1537
|
+
file_name = os.path.basename(self.args.file)
|
|
1538
|
+
commit_result = git_utils.commit_changes(
|
|
1539
|
+
self.args.source_dir,
|
|
1540
|
+
f"{self.args.query}\nauto_coder_{file_name}",
|
|
1541
|
+
)
|
|
1542
|
+
|
|
1543
|
+
get_event_manager(self.args.event_file).write_result(
|
|
1544
|
+
EventContentCreator.create_result(
|
|
1545
|
+
content=self.printer.get_message_from_key("/agent/edit/apply_changes")), metadata=EventMetadata(
|
|
1546
|
+
action_file=self.args.file,
|
|
1547
|
+
is_streaming=False,
|
|
1548
|
+
stream_out_type="/agent/edit").to_dict())
|
|
1549
|
+
action_yml_file_manager = ActionYmlFileManager(
|
|
1550
|
+
self.args.source_dir)
|
|
1551
|
+
action_file_name = os.path.basename(self.args.file)
|
|
1552
|
+
add_updated_urls = []
|
|
1553
|
+
commit_result.changed_files
|
|
1554
|
+
for file in commit_result.changed_files:
|
|
1555
|
+
add_updated_urls.append(
|
|
1556
|
+
os.path.join(self.args.source_dir, file))
|
|
1557
|
+
|
|
1558
|
+
self.args.add_updated_urls = add_updated_urls
|
|
1559
|
+
update_yaml_success = action_yml_file_manager.update_yaml_field(
|
|
1560
|
+
action_file_name, "add_updated_urls", add_updated_urls)
|
|
1561
|
+
if not update_yaml_success:
|
|
1562
|
+
self.printer.print_in_terminal(
|
|
1563
|
+
"yaml_save_error", style="red", yaml_file=action_file_name)
|
|
1564
|
+
|
|
1565
|
+
if self.args.enable_active_context:
|
|
1566
|
+
active_context_manager = ActiveContextManager(
|
|
1567
|
+
self.llm, self.args.source_dir)
|
|
1568
|
+
task_id = active_context_manager.process_changes(
|
|
1569
|
+
self.args)
|
|
1570
|
+
self.printer.print_in_terminal("active_context_background_task",
|
|
1571
|
+
style="blue",
|
|
1572
|
+
task_id=task_id)
|
|
1573
|
+
git_utils.print_commit_info(commit_result=commit_result)
|
|
1574
|
+
except Exception as e:
|
|
1575
|
+
self.printer.print_str_in_terminal(
|
|
1576
|
+
self.git_require_msg(
|
|
1577
|
+
source_dir=self.args.source_dir, error=str(e)),
|
|
1578
|
+
style="red"
|
|
1579
|
+
)
|
|
1580
|
+
else:
|
|
1581
|
+
self.printer.print_in_terminal("no_changes_made")
|
|
1582
|
+
|