auto-coder 0.1.337__py3-none-any.whl → 0.1.340__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.337
3
+ Version: 0.1.340
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -4,7 +4,7 @@ autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,
4
4
  autocoder/auto_coder_rag.py,sha256=NesRm7sIJrRQL1xxm_lbMtM7gi-KrYv9f26RfBuloZE,35386
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeatJbDY6rSo0,6270
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
- autocoder/auto_coder_runner.py,sha256=MMppqdwfT1a-NoIxqbQGNS2z2Wi0CWY6X3AK8morCsE,111424
7
+ autocoder/auto_coder_runner.py,sha256=-6s6wLoiedOjcUPP-658jovP08frJ4csJk3QP8ssvGo,111781
8
8
  autocoder/auto_coder_server.py,sha256=bLORGEclcVdbBVfM140JCI8WtdrU0jbgqdJIVVupiEU,20578
9
9
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
10
10
  autocoder/chat_auto_coder.py,sha256=CthuvdjVjTQOVv-zREsl8OCsZHPSP9OQcIgHULrW2Ro,25842
@@ -14,7 +14,7 @@ autocoder/command_parser.py,sha256=fx1g9E6GaM273lGTcJqaFQ-hoksS_Ik2glBMnVltPCE,1
14
14
  autocoder/lang.py,sha256=PFtATuOhHRnfpqHQkXr6p4C893JvpsgwTMif3l-GEi0,14321
15
15
  autocoder/models.py,sha256=_SCar82QIeBFTZZBdM2jPS6atKVhHnvE0gX3V0CsxD4,11590
16
16
  autocoder/run_context.py,sha256=IUfSO6_gp2Wt1blFWAmOpN0b0nDrTTk4LmtCYUBIoro,1643
17
- autocoder/version.py,sha256=_LNwUtBnPaBerh9KZiByo60wMmCJ6961eqCjdcRR4Zk,23
17
+ autocoder/version.py,sha256=WiDDh2vIU0y8fMFsb94eqiIbTnaKCPuN5_jXPXD2gQA,23
18
18
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
19
  autocoder/agent/agentic_edit.py,sha256=XsfePZ-t6M-uBSdG1VLZXk1goqXk2HPeJ_A8IYyBuWQ,58896
20
20
  autocoder/agent/agentic_edit_types.py,sha256=oFcDd_cxJ2yH9Ed1uTpD3BipudgoIEWDMPb5pAkq4gI,3288
@@ -54,7 +54,7 @@ autocoder/common/action_yml_file_manager.py,sha256=DdF5P1R_B_chCnnqoA2IgogakWLZk
54
54
  autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
55
55
  autocoder/common/anything2img.py,sha256=iZQmg8srXlD7N5uGl5b_ONKJMBjYoW8kPmokkG6ISF0,10118
56
56
  autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
57
- autocoder/common/auto_coder_lang.py,sha256=cNwzXknWDDFUoG68D0xhp1KgTC25bS8pbObeL-1pRpA,40656
57
+ autocoder/common/auto_coder_lang.py,sha256=ozoGTy4ZFn3YsO5zWhvAGCu54mK4LtnRfC2yCvrMc_8,42462
58
58
  autocoder/common/auto_configure.py,sha256=D4N-fl9v8bKM5-Ds-uhkC2uGDmHH_ZjLJ759F8KXMKs,13129
59
59
  autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
60
60
  autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
@@ -126,10 +126,10 @@ autocoder/common/v2/code_editblock_manager.py,sha256=G0CIuV9Ki0FqMLnpA8nBT4pnkCN
126
126
  autocoder/common/v2/code_manager.py,sha256=C403bS-f6urixwitlKHcml-J03hci-UyNwHJOqBiY6Q,9182
127
127
  autocoder/common/v2/code_strict_diff_manager.py,sha256=v-J1kDyLg7tLGg_6_lbO9S4fNkx7M_L8Xr2G7fPptiU,9347
128
128
  autocoder/common/v2/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
129
- autocoder/common/v2/agent/agentic_edit.py,sha256=iFr2Umr1qXYGH0enzn2KJS8H2nqfuKkyBidbUKq38Bc,87031
130
- autocoder/common/v2/agent/agentic_edit_conversation.py,sha256=XtN0D1stP80gKnGiVxFMZL1DbIU56AISyDkS52RMaac,4434
131
- autocoder/common/v2/agent/agentic_edit_types.py,sha256=ZCgIu0Dj4xPP9s-lWtzh1-wBvoXrSkgu3pan_Oo_Ng0,4433
132
- autocoder/common/v2/agent/agentic_tool_display.py,sha256=5KshKQX0YFcGobfbJIwylwkalAEuZVUO68hSrIlPM64,7341
129
+ autocoder/common/v2/agent/agentic_edit.py,sha256=e35KhV0hTz973RcF98qylVFCaBoWtC0EufmEZgHrHD4,91123
130
+ autocoder/common/v2/agent/agentic_edit_conversation.py,sha256=qLLhTegH619JQTp3s1bj5FVn2hAcoV-DlhGO3UyIOMc,7338
131
+ autocoder/common/v2/agent/agentic_edit_types.py,sha256=6qBLLmvdlcsbzrpMHsYQVIHqbOWubMXOnmkqTs1pBWQ,4629
132
+ autocoder/common/v2/agent/agentic_tool_display.py,sha256=WKirt-2V346KLnbHgH3NVJiK3xvriD9oaCWj2IdvzLU,7309
133
133
  autocoder/common/v2/agent/agentic_edit_tools/__init__.py,sha256=wGICCc1dYh07osB21j62zOQ9Ws0PyyOQ12UYRHmHrtI,1229
134
134
  autocoder/common/v2/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py,sha256=pjrukXjWXMIfUAUzoHzr7j2Onf1L7bxmjsUR1gGaFoA,2809
135
135
  autocoder/common/v2/agent/agentic_edit_tools/attempt_completion_tool_resolver.py,sha256=82ZGKeRBSDKeead_XVBW4FxpiE-5dS7tBOk_3RZ6B5s,1511
@@ -139,7 +139,7 @@ autocoder/common/v2/agent/agentic_edit_tools/list_code_definition_names_tool_res
139
139
  autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py,sha256=ERM5E7s2azQ8vcvogan4A_LZci8Pmhmxw1uQaNQhon4,5469
140
140
  autocoder/common/v2/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py,sha256=SZwFUxK6d2BaKWqQXi_c3IVe2iffviF6VUXJA9T9sx0,1492
141
141
  autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py,sha256=9Bh0KVbL0qiIqwChlb77biiBiETQ3zekxGe5Fj7hXAg,2800
142
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py,sha256=tTQpCIGIzh1XO_MZm6wefMvUm_h6cKoa--oPIm-VwXM,7342
142
+ autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py,sha256=lpD4fCbVR8GTrynqXON69IjM94nPy3nuUL62Ashm5O4,7988
143
143
  autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py,sha256=K-TcqY0z7nDupMkTRDAJdqW3z2Y_RUM_wUb-pOEVQRI,6044
144
144
  autocoder/common/v2/agent/agentic_edit_tools/use_mcp_tool_resolver.py,sha256=wM2Xy4bcnD0TSLEmcM8rvvyyWenN5_KQnJMO6hJ8lTE,1716
145
145
  autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py,sha256=UO4SrkDek3WDlRdlHH022W1roSNMdMcipJqDxRBlheM,3044
@@ -276,9 +276,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
276
276
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
277
277
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=KW0mlmcHlStXi8-_6fXZ2-ifeJ5mgP0OV7DQFzCtIsw,14008
278
278
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
279
- auto_coder-0.1.337.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
280
- auto_coder-0.1.337.dist-info/METADATA,sha256=GH7LgmlLQFdtYtvXWvRcDzUoY5toRt_Bmf0XX8Mcghg,2747
281
- auto_coder-0.1.337.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
282
- auto_coder-0.1.337.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
283
- auto_coder-0.1.337.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
284
- auto_coder-0.1.337.dist-info/RECORD,,
279
+ auto_coder-0.1.340.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
280
+ auto_coder-0.1.340.dist-info/METADATA,sha256=Dx2t8Mft1eVAJ3lW0jMZIZtetFSpBZduT8Pa1xxXGjs,2747
281
+ auto_coder-0.1.340.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
282
+ auto_coder-0.1.340.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
283
+ auto_coder-0.1.340.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
284
+ auto_coder-0.1.340.dist-info/RECORD,,
@@ -2821,8 +2821,9 @@ def generate_new_yaml(query: str):
2821
2821
  def auto_command(query: str,extra_args: Dict[str,Any]={}):
2822
2822
  """处理/auto指令"""
2823
2823
  args = get_final_config()
2824
- memory = get_memory()
2825
- if args.enable_agentic_edit:
2824
+ memory = get_memory()
2825
+ if args.enable_agentic_edit:
2826
+ from autocoder.run_context import get_run_context,RunMode
2826
2827
  execute_file,args = generate_new_yaml(query)
2827
2828
  args.file = execute_file
2828
2829
  current_files = memory.get("current_files",{}).get("files",[])
@@ -2831,14 +2832,20 @@ def auto_command(query: str,extra_args: Dict[str,Any]={}):
2831
2832
  with open(file,"r",encoding="utf-8") as f:
2832
2833
  sources.append(SourceCode(module_name=file,source_code=f.read()))
2833
2834
 
2834
- llm = get_single_llm(args.code_model or args.model,product_mode=args.product_mode)
2835
- agent = AgenticEdit(llm=llm,args=args,files=SourceCodeList(sources=sources),
2836
- conversation_history=[],
2837
- memory_config=MemoryConfig(memory=memory,
2838
- save_memory_func=save_memory), command_config=CommandConfig)
2835
+ llm = get_single_llm(args.code_model or args.model,product_mode=args.product_mode)
2836
+ conversation_history = extra_args.get("conversations",[])
2837
+ agent = AgenticEdit(llm=llm,args=args,files=SourceCodeList(sources=sources),
2838
+ conversation_history=conversation_history,
2839
+ memory_config=MemoryConfig(memory=memory,
2840
+ save_memory_func=save_memory), command_config=CommandConfig,
2841
+ conversation_name="current"
2842
+ )
2843
+ if get_run_context() == RunMode.TERMINAL:
2839
2844
  agent.run_in_terminal(AgenticEditRequest(user_input=query))
2840
- return
2841
-
2845
+ else:
2846
+ agent.run_with_events(AgenticEditRequest(user_input=query))
2847
+ return
2848
+
2842
2849
  args = get_final_config()
2843
2850
  # 准备请求参数
2844
2851
  request = AutoCommandRequest(
@@ -837,6 +837,42 @@ MESSAGES = {
837
837
  }
838
838
 
839
839
 
840
+ # 新增 ReplaceInFileToolResolver 国际化消息
841
+ MESSAGES.update({
842
+ "replace_in_file.access_denied": {
843
+ "en": "Error: Access denied. Attempted to modify file outside the project directory: {{file_path}}",
844
+ "zh": "错误:拒绝访问。尝试修改项目目录之外的文件:{{file_path}}"
845
+ },
846
+ "replace_in_file.file_not_found": {
847
+ "en": "Error: File not found at path: {{file_path}}",
848
+ "zh": "错误:未找到文件路径:{{file_path}}"
849
+ },
850
+ "replace_in_file.read_error": {
851
+ "en": "An error occurred while reading the file for replacement: {{error}}",
852
+ "zh": "读取待替换文件时发生错误:{{error}}"
853
+ },
854
+ "replace_in_file.no_valid_blocks": {
855
+ "en": "Error: No valid SEARCH/REPLACE blocks found in the provided diff.",
856
+ "zh": "错误:在提供的diff中未找到有效的SEARCH/REPLACE代码块。"
857
+ },
858
+ "replace_in_file.apply_failed": {
859
+ "en": "Failed to apply any changes. Errors:\n{{errors}}",
860
+ "zh": "未能应用任何更改。错误信息:\n{{errors}}"
861
+ },
862
+ "replace_in_file.apply_success": {
863
+ "en": "Successfully applied {{applied}}/{{total}} changes to file: {{file_path}}.",
864
+ "zh": "成功应用了 {{applied}}/{{total}} 个更改到文件:{{file_path}}。"
865
+ },
866
+ "replace_in_file.apply_success_with_warnings": {
867
+ "en": "Successfully applied {{applied}}/{{total}} changes to file: {{file_path}}.\nWarnings:\n{{errors}}",
868
+ "zh": "成功应用了 {{applied}}/{{total}} 个更改到文件:{{file_path}}。\n警告信息:\n{{errors}}"
869
+ },
870
+ "replace_in_file.write_error": {
871
+ "en": "An error occurred while writing the modified file: {{error}}",
872
+ "zh": "写入修改后的文件时发生错误:{{error}}"
873
+ }
874
+ })
875
+
840
876
  def get_system_language():
841
877
  try:
842
878
  return locale.getdefaultlocale()[0][:2]
@@ -1,3 +1,4 @@
1
+ from autocoder.common.v2.agent.agentic_edit_conversation import AgenticConversation
1
2
  from enum import Enum
2
3
  from enum import Enum
3
4
  import json
@@ -10,6 +11,7 @@ from autocoder.common.printer import Printer
10
11
  from rich.console import Console
11
12
  from rich.panel import Panel
12
13
  from pydantic import SkipValidation
14
+ from byzerllm.utils.types import SingleOutputMeta
13
15
 
14
16
  # Removed ResultManager, stream_out, git_utils, AutoCommandTools, count_tokens, global_cancel, ActionYmlFileManager, get_event_manager, EventContentCreator, get_run_context, AgenticFilterStreamOutType
15
17
  from autocoder.common import AutoCoderArgs, git_utils, SourceCodeList, SourceCode
@@ -64,7 +66,7 @@ from autocoder.common.v2.agent.agentic_edit_types import (AgenticEditRequest, To
64
66
  TOOL_MODEL_MAP,
65
67
  # Event Types
66
68
  LLMOutputEvent, LLMThinkingEvent, ToolCallEvent,
67
- ToolResultEvent, CompletionEvent, ErrorEvent,TokenUsageEvent,
69
+ ToolResultEvent, CompletionEvent, PlanModeRespondEvent, ErrorEvent, TokenUsageEvent,
68
70
  # Import specific tool types for display mapping
69
71
  ReadFileTool, WriteToFileTool, ReplaceInFileTool, ExecuteCommandTool,
70
72
  ListFilesTool, SearchFilesTool, ListCodeDefinitionNamesTool,
@@ -87,6 +89,7 @@ TOOL_RESOLVER_MAP: Dict[Type[BaseTool], Type[BaseToolResolver]] = {
87
89
  UseMcpTool: UseMcpToolResolver,
88
90
  }
89
91
 
92
+
90
93
  # --- Tool Display Customization is now handled by agentic_tool_display.py ---
91
94
 
92
95
 
@@ -99,6 +102,7 @@ class AgenticEdit:
99
102
  args: AutoCoderArgs,
100
103
  memory_config: MemoryConfig,
101
104
  command_config: Optional[CommandConfig] = None,
105
+ conversation_name: str = "current"
102
106
  ):
103
107
  self.llm = llm
104
108
  self.args = args
@@ -113,27 +117,32 @@ class AgenticEdit:
113
117
  self.project_type_analyzer = ProjectTypeAnalyzer(
114
118
  args=args, llm=self.llm)
115
119
 
120
+ self.conversation_manager = AgenticConversation(
121
+ args, self.conversation_history, conversation_name=conversation_name)
122
+ # 当前不开启历史记录,所以清空
123
+ self.conversation_manager.clear_history()
124
+
116
125
  self.shadow_manager = ShadowManager(
117
126
  args.source_dir, args.event_file, args.ignore_clean_shadows)
118
127
  self.shadow_linter = ShadowLinter(self.shadow_manager, verbose=False)
119
128
  self.shadow_compiler = ShadowCompiler(
120
- self.shadow_manager, verbose=False)
121
-
129
+ self.shadow_manager, verbose=False)
130
+
122
131
  self.mcp_server_info = ""
123
- # try:
124
- # self.mcp_server = get_mcp_server()
125
- # mcp_server_info_response = self.mcp_server.send_request(
126
- # McpServerInfoRequest(
127
- # model=args.inference_model or args.model,
128
- # product_mode=args.product_mode,
129
- # )
130
- # )
131
- # self.mcp_server_info = mcp_server_info_response.result
132
- # except Exception as e:
133
- # logger.error(f"Error getting MCP server info: {str(e)}")
132
+ try:
133
+ self.mcp_server = get_mcp_server()
134
+ mcp_server_info_response = self.mcp_server.send_request(
135
+ McpServerInfoRequest(
136
+ model=args.inference_model or args.model,
137
+ product_mode=args.product_mode,
138
+ )
139
+ )
140
+ self.mcp_server_info = mcp_server_info_response.result
141
+ except Exception as e:
142
+ logger.error(f"Error getting MCP server info: {str(e)}")
134
143
 
135
144
  # 变更跟踪信息
136
- # 格式: { file_path: FileChangeEntry(...) }
145
+ # 格式: { file_path: FileChangeEntry(...) }
137
146
  self.file_changes: Dict[str, FileChangeEntry] = {}
138
147
 
139
148
  def record_file_change(self, file_path: str, change_type: str, diff: Optional[str] = None, content: Optional[str] = None):
@@ -148,7 +157,8 @@ class AgenticEdit:
148
157
  """
149
158
  entry = self.file_changes.get(file_path)
150
159
  if entry is None:
151
- entry = FileChangeEntry(type=change_type, diffs=[], content=content)
160
+ entry = FileChangeEntry(
161
+ type=change_type, diffs=[], content=content)
152
162
  self.file_changes[file_path] = entry
153
163
  else:
154
164
  # 文件已经存在,可能之前是 added,现在又被 modified,或者多次 modified
@@ -185,8 +195,10 @@ class AgenticEdit:
185
195
  for fname in files:
186
196
  shadow_file_path = os.path.join(root, fname)
187
197
  try:
188
- project_file_path = self.shadow_manager.from_shadow_path(shadow_file_path)
189
- rel_path = os.path.relpath(project_file_path, self.args.source_dir)
198
+ project_file_path = self.shadow_manager.from_shadow_path(
199
+ shadow_file_path)
200
+ rel_path = os.path.relpath(
201
+ project_file_path, self.args.source_dir)
190
202
  changed_files.append(rel_path)
191
203
  except Exception:
192
204
  # 非映射关系,忽略
@@ -377,7 +389,7 @@ class AgenticEdit:
377
389
  </options>
378
390
  </plan_mode_respond>
379
391
 
380
- ## MCP_TOOL
392
+ ## mcp_tool
381
393
  Description: Request to execute a tool via the Model Context Protocol (MCP) server. Use this when you need to execute a tool that is not natively supported by the agentic edit tools.
382
394
  Parameters:
383
395
  - server_name: (optional) The name of the MCP server to use. If not provided, the tool will automatically choose the best server based on the query.
@@ -391,7 +403,7 @@ class AgenticEdit:
391
403
  Your query here
392
404
  </query>
393
405
  </use_mcp_tool>
394
-
406
+
395
407
  {%if mcp_server_info %}
396
408
  ### MCP_SERVER_LIST
397
409
  {{mcp_server_info}}
@@ -463,24 +475,13 @@ class AgenticEdit:
463
475
  </diff>
464
476
  </replace_in_file>
465
477
 
466
- ## Example 4: Another example of using an MCP tool (where the server name is a unique identifier such as a URL)
478
+ ## Example 4: Another example of using an MCP tool (where the server name is a unique identifier listed in MCP_SERVER_LIST)
467
479
 
468
480
  <use_mcp_tool>
469
- <server_name>github.com/modelcontextprotocol/servers/tree/main/src/github</server_name>
481
+ <server_name>github</server_name>
470
482
  <tool_name>create_issue</tool_name>
471
- <arguments>
472
- {
473
- "owner": "octocat",
474
- "repo": "hello-world",
475
- "title": "Found a bug",
476
- "body": "I'm having a problem with this.",
477
- "labels": ["bug", "help wanted"],
478
- "assignees": ["octocat"]
479
- }
480
- </arguments>
481
- </use_mcp_tool>`
482
- : ""
483
- }
483
+ <query>ower is octocat, repo is hello-world, title is Found a bug, body is I'm having a problem with this. labels is "bug" and "help wanted",assignees is "octocat"</query>
484
+ </use_mcp_tool>
484
485
 
485
486
  # Tool Use Guidelines
486
487
 
@@ -660,12 +661,12 @@ class AgenticEdit:
660
661
  4. Once you've completed the user's task, you must use the attempt_completion tool to present the result of the task to the user. You may also provide a CLI command to showcase the result of your task; this can be particularly useful for web development tasks, where you can run e.g. \`open index.html\` to show the website you've built.
661
662
  5. The user may provide feedback, which you can use to make improvements and try again. But DO NOT continue in pointless back and forth conversations, i.e. don't end your responses with questions or offers for further assistance.
662
663
 
663
- {{ enable_active_context }}
664
+ {% if enable_active_context %}
664
665
  **Very Important Notice**
665
666
  Each directory has a description file stored separately. For example, the description for the directory `{{ current_project }}/src/abc/bbc` can be found in the file `{{ current_project }}/.auto-coder/active-context/src/abc/bbc/active.md`.
666
667
  You can use the tool `read_file` to read these description files, which helps you decide exactly which files need detailed attention. Note that the `active.md` file does not contain information about all files within the directory—it only includes information
667
668
  about the files that were recently changed.
668
- {{ enable_active_context }}
669
+ {% endif %}
669
670
  """
670
671
  env_info = detect_env()
671
672
  shell_type = "bash"
@@ -735,31 +736,36 @@ class AgenticEdit:
735
736
  Analyzes the user request, interacts with the LLM, parses responses,
736
737
  executes tools, and yields structured events for visualization until completion or error.
737
738
  """
738
- system_prompt = self._analyze.prompt(request)
739
+ system_prompt = self._analyze.prompt(request)
740
+ # print(system_prompt)
739
741
  conversations = [
740
742
  {"role": "system", "content": system_prompt},
741
- {"role": "user", "content": request.user_input}
742
- ]
743
+ ] + self.conversation_manager.get_history()
744
+ conversations.append({
745
+ "role": "user", "content": request.user_input
746
+ })
747
+ self.conversation_manager.add_user_message(request.user_input)
743
748
  logger.debug(
744
749
  f"Initial conversation history size: {len(conversations)}")
745
-
750
+
751
+ tool_executed = False
746
752
  while True:
747
753
  global_cancel.check_and_raise()
748
754
  logger.info(
749
755
  f"Starting LLM interaction cycle. History size: {len(conversations)}")
750
- tool_executed = False
751
- assistant_buffer = ""
752
756
 
757
+ assistant_buffer = ""
758
+
753
759
  llm_response_gen = stream_chat_with_continue(
754
760
  llm=self.llm,
755
761
  conversations=conversations,
756
762
  llm_config={}, # Placeholder for future LLM configs
757
763
  args=self.args
758
764
  )
759
-
765
+
760
766
  meta_holder = byzerllm.MetaHolder()
761
767
  parsed_events = self.stream_and_parse_llm_response(
762
- llm_response_gen,meta_holder)
768
+ llm_response_gen, meta_holder)
763
769
 
764
770
  for event in parsed_events:
765
771
  global_cancel.check_and_raise()
@@ -777,6 +783,8 @@ class AgenticEdit:
777
783
  "role": "assistant",
778
784
  "content": assistant_buffer + tool_xml
779
785
  })
786
+ self.conversation_manager.add_assistant_message(
787
+ assistant_buffer + tool_xml)
780
788
  assistant_buffer = "" # Reset buffer after tool call
781
789
 
782
790
  yield event # Yield the ToolCallEvent for display
@@ -790,6 +798,14 @@ class AgenticEdit:
790
798
  "AgenticEdit analyze loop finished due to AttemptCompletion.")
791
799
  return
792
800
 
801
+ if isinstance(tool_obj, PlanModeRespondTool):
802
+ logger.info(
803
+ "PlanModeRespondTool received. Finalizing session.")
804
+ yield PlanModeRespondEvent(completion=tool_obj, completion_xml=tool_xml)
805
+ logger.info(
806
+ "AgenticEdit analyze loop finished due to PlanModeRespond.")
807
+ return
808
+
793
809
  # Resolve the tool
794
810
  resolver_cls = TOOL_RESOLVER_MAP.get(type(tool_obj))
795
811
  if not resolver_cls:
@@ -844,6 +860,7 @@ class AgenticEdit:
844
860
  "role": "user", # Simulating the user providing the tool result
845
861
  "content": error_xml
846
862
  })
863
+ self.conversation_manager.add_user_message(error_xml)
847
864
  logger.debug(
848
865
  f"Added tool result to conversations for tool {type(tool_obj).__name__}")
849
866
  break # After tool execution and result, break to start a new LLM cycle
@@ -862,6 +879,8 @@ class AgenticEdit:
862
879
  if assistant_buffer:
863
880
  conversations.append(
864
881
  {"role": "assistant", "content": assistant_buffer})
882
+ self.conversation_manager.add_assistant_message(
883
+ assistant_buffer)
865
884
  # If the loop ends without AttemptCompletion, it means the LLM finished talking
866
885
  # without signaling completion. We might just stop or yield a final message.
867
886
  # Let's assume it stops here.
@@ -870,7 +889,7 @@ class AgenticEdit:
870
889
  logger.info("AgenticEdit analyze loop finished.")
871
890
 
872
891
  def stream_and_parse_llm_response(
873
- self, generator: Generator[Tuple[str, Any], None, None],meta_holder: byzerllm.MetaHolder
892
+ self, generator: Generator[Tuple[str, Any], None, None], meta_holder: byzerllm.MetaHolder
874
893
  ) -> Generator[Union[LLMOutputEvent, LLMThinkingEvent, ToolCallEvent, ErrorEvent], None, None]:
875
894
  """
876
895
  Streamingly parses the LLM response generator, distinguishing between
@@ -920,19 +939,18 @@ class AgenticEdit:
920
939
  params['requires_approval'] = params['requires_approval'].lower(
921
940
  ) == 'true'
922
941
  # Attempt to handle JSON parsing for arguments in use_mcp_tool
923
- if tool_tag == 'use_mcp_tool' and 'arguments' in params:
924
- try:
925
- params['arguments'] = json.loads(
926
- params['arguments'])
927
- except json.JSONDecodeError:
928
- logger.warning(
929
- f"Could not decode JSON arguments for use_mcp_tool: {params['arguments']}")
930
- # Keep as string or handle error? Let's keep as string for now.
931
- pass
942
+ # if tool_tag == 'use_mcp_tool' and 'query' in params:
943
+ # try:
944
+ # params['arguments'] = json.loads(
945
+ # params['arguments'])
946
+ # except json.JSONDecodeError:
947
+ # logger.warning(
948
+ # f"Could not decode JSON arguments for use_mcp_tool: {params['arguments']}")
949
+ # # Keep as string or handle error? Let's keep as string for now.
950
+ # pass
932
951
  # Handle recursive for list_files
933
952
  if tool_tag == 'list_files' and 'recursive' in params:
934
- params['recursive'] = params['recursive'].lower() == 'true'
935
-
953
+ params['recursive'] = params['recursive'].lower() == 'true'
936
954
  return tool_cls(**params)
937
955
  else:
938
956
  logger.error(f"Tool class not found for tag: {tool_tag}")
@@ -970,7 +988,7 @@ class AgenticEdit:
970
988
  break
971
989
 
972
990
  # 2. Check for </tool_tag> if inside tool block
973
- elif in_tool_block:
991
+ elif in_tool_block:
974
992
  end_tag = f"</{current_tool_tag}>"
975
993
  end_tool_pos = buffer.find(end_tag)
976
994
  if end_tool_pos != -1:
@@ -1081,17 +1099,17 @@ class AgenticEdit:
1081
1099
  Runs the agentic edit process, converting internal events to the
1082
1100
  standard event system format and writing them using the event manager.
1083
1101
  """
1084
- event_manager = get_event_manager(self.args.event_file)
1102
+ event_manager = get_event_manager(self.args.event_file)
1085
1103
 
1086
1104
  try:
1087
1105
  event_stream = self.analyze(request)
1088
1106
  for agent_event in event_stream:
1089
1107
  content = None
1090
1108
  metadata = EventMetadata(
1091
- action_file=self.args.event_file,
1092
- is_streaming=False,
1109
+ action_file=self.args.event_file,
1110
+ is_streaming=False,
1093
1111
  stream_out_type="/agent/edit")
1094
-
1112
+
1095
1113
  if isinstance(agent_event, LLMThinkingEvent):
1096
1114
  content = EventContentCreator.create_stream_thinking(
1097
1115
  content=agent_event.text)
@@ -1128,23 +1146,62 @@ class AgenticEdit:
1128
1146
  metadata={}
1129
1147
  )
1130
1148
  event_manager.write_result(
1131
- content=content.to_dict(), metadata=metadata)
1149
+ content=content.to_dict(), metadata=metadata.to_dict())
1150
+ elif isinstance(agent_event, PlanModeRespondEvent):
1151
+ metadata.path = "/agent/edit/plan_mode_respond"
1152
+ content = EventContentCreator.create_markdown_result(
1153
+ content=agent_event.completion.response,
1154
+ metadata={}
1155
+ )
1156
+ event_manager.write_result(
1157
+ content=content.to_dict(), metadata=metadata.to_dict())
1158
+
1159
+ elif isinstance(agent_event, TokenUsageEvent):
1160
+ last_meta: SingleOutputMeta = agent_event.usage
1161
+ # Get model info for pricing
1162
+ from autocoder.utils import llms as llm_utils
1163
+ model_name = ",".join(llm_utils.get_llm_names(self.llm))
1164
+ model_info = llm_utils.get_model_info(
1165
+ model_name, self.args.product_mode) or {}
1166
+ input_price = model_info.get(
1167
+ "input_price", 0.0) if model_info else 0.0
1168
+ output_price = model_info.get(
1169
+ "output_price", 0.0) if model_info else 0.0
1170
+
1171
+ # Calculate costs
1172
+ input_cost = (last_meta.input_tokens_count *
1173
+ input_price) / 1000000 # Convert to millions
1174
+ # Convert to millions
1175
+ output_cost = (
1176
+ last_meta.generated_tokens_count * output_price) / 1000000
1177
+
1178
+ get_event_manager(self.args.event_file).write_result(
1179
+ EventContentCreator.create_result(content=EventContentCreator.ResultTokenStatContent(
1180
+ model_name=model_name,
1181
+ elapsed_time=0.0,
1182
+ first_token_time=last_meta.first_token_time,
1183
+ input_tokens=last_meta.input_tokens_count,
1184
+ output_tokens=last_meta.generated_tokens_count,
1185
+ ).to_dict()),metadata=metadata.to_dict())
1186
+
1132
1187
  elif isinstance(agent_event, CompletionEvent):
1133
1188
  # 在这里完成实际合并
1134
1189
  try:
1135
1190
  self.apply_changes()
1136
1191
  except Exception as e:
1137
- logger.exception(f"Error merging shadow changes to project: {e}")
1192
+ logger.exception(
1193
+ f"Error merging shadow changes to project: {e}")
1138
1194
 
1139
1195
  metadata.path = "/agent/edit/completion"
1140
1196
  content = EventContentCreator.create_completion(
1141
1197
  success_code="AGENT_COMPLETE",
1142
1198
  success_message="Agent attempted task completion.",
1143
1199
  result={
1144
- **agent_event.completion.model_dump()
1200
+ "response": agent_event.completion.result
1145
1201
  }
1146
1202
  )
1147
- event_manager.write_completion(content=content.to_dict(), metadata=metadata.to_dict())
1203
+ event_manager.write_completion(
1204
+ content=content.to_dict(), metadata=metadata.to_dict())
1148
1205
  elif isinstance(agent_event, ErrorEvent):
1149
1206
  metadata.path = "/agent/edit/error"
1150
1207
  content = EventContentCreator.create_error(
@@ -1152,7 +1209,8 @@ class AgenticEdit:
1152
1209
  error_message=agent_event.message,
1153
1210
  details={"agent_event_type": "ErrorEvent"}
1154
1211
  )
1155
- event_manager.write_error(content=content.to_dict(), metadata=metadata.to_dict())
1212
+ event_manager.write_error(
1213
+ content=content.to_dict(), metadata=metadata.to_dict())
1156
1214
  else:
1157
1215
  metadata.path = "/agent/edit/error"
1158
1216
  logger.warning(
@@ -1163,7 +1221,8 @@ class AgenticEdit:
1163
1221
  details={"agent_event_type": type(
1164
1222
  agent_event).__name__}
1165
1223
  )
1166
- event_manager.write_error(content=content.to_dict(), metadata=metadata.to_dict())
1224
+ event_manager.write_error(
1225
+ content=content.to_dict(), metadata=metadata.to_dict())
1167
1226
 
1168
1227
  except Exception as e:
1169
1228
  logger.exception(
@@ -1174,15 +1233,16 @@ class AgenticEdit:
1174
1233
  error_message=f"An unexpected error occurred: {str(e)}",
1175
1234
  details={"exception_type": type(e).__name__}
1176
1235
  )
1177
- event_manager.write_error(content=error_content.to_dict(), metadata=metadata.to_dict())
1236
+ event_manager.write_error(
1237
+ content=error_content.to_dict(), metadata=metadata.to_dict())
1178
1238
  # Re-raise the exception if needed, or handle appropriately
1179
1239
  raise e
1180
1240
 
1181
1241
  def apply_changes(self):
1182
1242
  """
1183
1243
  Apply all tracked file changes to the original project directory.
1184
- """
1185
- for (file_path,change) in self.get_all_file_changes().items():
1244
+ """
1245
+ for (file_path, change) in self.get_all_file_changes().items():
1186
1246
  with open(file_path, 'w', encoding='utf-8') as f:
1187
1247
  f.write(change.content)
1188
1248
 
@@ -1194,33 +1254,40 @@ class AgenticEdit:
1194
1254
  self.args.source_dir,
1195
1255
  f"{self.args.query}\nauto_coder_{file_name}",
1196
1256
  )
1197
-
1198
- action_yml_file_manager = ActionYmlFileManager(self.args.source_dir)
1257
+
1258
+ action_yml_file_manager = ActionYmlFileManager(
1259
+ self.args.source_dir)
1199
1260
  action_file_name = os.path.basename(self.args.file)
1200
1261
  add_updated_urls = []
1201
1262
  commit_result.changed_files
1202
1263
  for file in commit_result.changed_files:
1203
- add_updated_urls.append(os.path.join(self.args.source_dir, file))
1264
+ add_updated_urls.append(
1265
+ os.path.join(self.args.source_dir, file))
1204
1266
 
1205
1267
  self.args.add_updated_urls = add_updated_urls
1206
- update_yaml_success = action_yml_file_manager.update_yaml_field(action_file_name, "add_updated_urls", add_updated_urls)
1207
- if not update_yaml_success:
1208
- self.printer.print_in_terminal("yaml_save_error", style="red", yaml_file=action_file_name)
1268
+ update_yaml_success = action_yml_file_manager.update_yaml_field(
1269
+ action_file_name, "add_updated_urls", add_updated_urls)
1270
+ if not update_yaml_success:
1271
+ self.printer.print_in_terminal(
1272
+ "yaml_save_error", style="red", yaml_file=action_file_name)
1209
1273
 
1210
1274
  if self.args.enable_active_context:
1211
- active_context_manager = ActiveContextManager(self.llm, self.args.source_dir)
1212
- task_id = active_context_manager.process_changes(self.args)
1213
- self.printer.print_in_terminal("active_context_background_task",
1214
- style="blue",
1215
- task_id=task_id)
1275
+ active_context_manager = ActiveContextManager(
1276
+ self.llm, self.args.source_dir)
1277
+ task_id = active_context_manager.process_changes(
1278
+ self.args)
1279
+ self.printer.print_in_terminal("active_context_background_task",
1280
+ style="blue",
1281
+ task_id=task_id)
1216
1282
  git_utils.print_commit_info(commit_result=commit_result)
1217
1283
  except Exception as e:
1218
1284
  self.printer.print_str_in_terminal(
1219
- self.git_require_msg(source_dir=self.args.source_dir, error=str(e)),
1285
+ self.git_require_msg(
1286
+ source_dir=self.args.source_dir, error=str(e)),
1220
1287
  style="red"
1221
- )
1288
+ )
1222
1289
  else:
1223
- self.printer.print_in_terminal("no_changes_made")
1290
+ self.printer.print_in_terminal("no_changes_made")
1224
1291
 
1225
1292
  def run_in_terminal(self, request: AgenticEditRequest):
1226
1293
  """
@@ -1258,6 +1325,9 @@ class AgenticEdit:
1258
1325
  if event.tool_name == "AttemptCompletionTool":
1259
1326
  continue # Do not display AttemptCompletionTool result
1260
1327
 
1328
+ if event.tool_name == "PlanModeRespondTool":
1329
+ continue
1330
+
1261
1331
  result = event.result
1262
1332
  title = f"✅ Tool Result: {event.tool_name}" if result.success else f"❌ Tool Result: {event.tool_name}"
1263
1333
  border_style = "green" if result.success else "red"
@@ -1274,7 +1344,7 @@ class AgenticEdit:
1274
1344
  panel_content = [base_content]
1275
1345
  syntax_content = None
1276
1346
 
1277
- if result.content is not None:
1347
+ if result.content is not None:
1278
1348
  content_str = ""
1279
1349
  try:
1280
1350
  if isinstance(result.content, (dict, list)):
@@ -1316,7 +1386,8 @@ class AgenticEdit:
1316
1386
  else:
1317
1387
  content_str = str(result.content)
1318
1388
  # Append simple string content directly
1319
- panel_content.append(_format_content(content_str))
1389
+ panel_content.append(
1390
+ _format_content(content_str))
1320
1391
  except Exception as e:
1321
1392
  logger.warning(
1322
1393
  f"Error formatting tool result content: {e}")
@@ -1329,13 +1400,17 @@ class AgenticEdit:
1329
1400
  # Print syntax highlighted content separately if it exists
1330
1401
  if syntax_content:
1331
1402
  console.print(syntax_content)
1403
+ elif isinstance(event, PlanModeRespondEvent):
1404
+ console.print(Panel(Markdown(event.completion.response),
1405
+ title="🏁 Task Completion", border_style="green", title_align="left"))
1332
1406
 
1333
1407
  elif isinstance(event, CompletionEvent):
1334
1408
  # 在这里完成实际合并
1335
1409
  try:
1336
1410
  self.apply_changes()
1337
1411
  except Exception as e:
1338
- logger.exception(f"Error merging shadow changes to project: {e}")
1412
+ logger.exception(
1413
+ f"Error merging shadow changes to project: {e}")
1339
1414
 
1340
1415
  console.print(Panel(Markdown(event.completion.result),
1341
1416
  title="🏁 Task Completion", border_style="green", title_align="left"))
@@ -1,6 +1,7 @@
1
1
  # src/autocoder/common/v2/agent/agentic_edit_conversation.py
2
2
  import os
3
3
  import json
4
+ import uuid
4
5
  from typing import List, Dict, Any, Optional
5
6
  from autocoder.common import AutoCoderArgs
6
7
 
@@ -15,16 +16,32 @@ class AgenticConversation:
15
16
  and retrieving the history.
16
17
  """
17
18
 
18
- def __init__(self, args: AutoCoderArgs, initial_history: Optional[List[MessageType]] = None):
19
+ def __init__(self, args: AutoCoderArgs, initial_history: Optional[List[MessageType]] = None, conversation_name: Optional[str] = None):
19
20
  """
20
21
  Initializes the conversation history.
21
22
 
22
23
  Args:
23
24
  initial_history: An optional list of messages to start with.
25
+ conversation_name: Optional conversation identifier. If provided, history is saved/loaded from a file named after it.
24
26
  """
25
27
  self.project_path = args.source_dir
26
28
  self._history: List[MessageType] = initial_history if initial_history is not None else []
27
- self.memory_file_path = os.path.join(self.project_path, ".auto-coder", "memory", "agentic_edit_memory.json")
29
+
30
+ # Determine the memory directory
31
+ memory_dir = os.path.join(self.project_path, ".auto-coder", "memory", "agentic_edit_memory")
32
+ os.makedirs(memory_dir, exist_ok=True)
33
+
34
+ # Determine conversation file path
35
+ if conversation_name:
36
+ filename = f"{conversation_name}.json"
37
+ else:
38
+ conversation_name = str(uuid.uuid4())
39
+ filename = f"{conversation_name}.json"
40
+
41
+ self.conversation_name = conversation_name
42
+ self.memory_file_path = os.path.join(memory_dir, filename)
43
+
44
+ # Load existing history if file exists
28
45
  self._load_memory()
29
46
 
30
47
  def add_message(self, role: str, content: Any, **kwargs):
@@ -67,18 +84,67 @@ class AgenticConversation:
67
84
 
68
85
  def get_history(self) -> List[MessageType]:
69
86
  """
70
- Returns the current conversation history.
71
-
87
+ Returns the latest 20 pairs of (user, assistant) conversation history.
88
+ Merges adjacent same-role messages into one, concatenated by newline.
89
+ Ensures that each user message is paired with the subsequent assistant response,
90
+ skips other roles, and that the last message is always assistant (drops trailing user if unpaired).
91
+
72
92
  Returns:
73
- A list of message dictionaries.
93
+ A list of message dictionaries, ordered chronologically.
74
94
  """
75
- # Return a deep copy might be safer if messages contain mutable objects,
76
- # but a shallow copy is usually sufficient for typical message structures.
77
- return self._history.copy()
95
+ paired_history = []
96
+ pair_count = 0
97
+ pending_assistant = None
98
+ pending_user = None
99
+
100
+ # Traverse history in reverse to collect latest pairs with merging
101
+ for msg in reversed(self._history):
102
+ role = msg.get("role")
103
+ if role == "assistant":
104
+ if pending_assistant is None:
105
+ pending_assistant = dict(msg)
106
+ else:
107
+ # Merge with previous assistant
108
+ prev_content = pending_assistant.get("content", "")
109
+ curr_content = msg.get("content", "")
110
+ merged_content = (curr_content.strip() + "\n" + prev_content.strip()).strip()
111
+ pending_assistant["content"] = merged_content
112
+ elif role == "user":
113
+ if pending_user is None:
114
+ pending_user = dict(msg)
115
+ else:
116
+ # Merge with previous user
117
+ prev_content = pending_user.get("content", "")
118
+ curr_content = msg.get("content", "")
119
+ merged_content = (curr_content.strip() + "\n" + prev_content.strip()).strip()
120
+ pending_user["content"] = merged_content
121
+
122
+ if pending_assistant is not None:
123
+ # Have a full pair, insert in order
124
+ paired_history.insert(0, pending_user)
125
+ paired_history.insert(1, pending_assistant)
126
+ pair_count += 1
127
+ pending_assistant = None
128
+ pending_user = None
129
+ if pair_count >= 20:
130
+ break
131
+ else:
132
+ # User without assistant yet, continue accumulating
133
+ continue
134
+ else:
135
+ # Ignore other roles
136
+ continue
137
+
138
+ # Ensure last message is assistant, drop trailing user if unpaired
139
+ if paired_history and paired_history[-1].get("role") == "user":
140
+ paired_history.pop()
141
+
142
+ return paired_history
78
143
 
79
144
  def clear_history(self):
80
145
  """Clears the conversation history."""
81
146
  self._history = []
147
+ self._save_memory()
82
148
 
83
149
  def __len__(self) -> int:
84
150
  """Returns the number of messages in the history."""
@@ -6,6 +6,7 @@ from autocoder.common import AutoCoderArgs
6
6
  from autocoder.common.v2.agent.agentic_edit_tools.base_tool_resolver import BaseToolResolver
7
7
  from autocoder.common.v2.agent.agentic_edit_types import ReplaceInFileTool, ToolResult # Import ToolResult from types
8
8
  from loguru import logger
9
+ from autocoder.common.auto_coder_lang import get_message_with_format
9
10
  if typing.TYPE_CHECKING:
10
11
  from autocoder.common.v2.agent.agentic_edit import AgenticEdit
11
12
 
@@ -66,7 +67,7 @@ class ReplaceInFileToolResolver(BaseToolResolver):
66
67
 
67
68
  # Security check
68
69
  if not abs_file_path.startswith(abs_project_dir):
69
- return ToolResult(success=False, message=f"Error: Access denied. Attempted to modify file outside the project directory: {file_path}")
70
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.access_denied", file_path=file_path))
70
71
 
71
72
  # Determine target path: shadow file if shadow_manager exists
72
73
  target_path = abs_file_path
@@ -90,14 +91,14 @@ class ReplaceInFileToolResolver(BaseToolResolver):
90
91
  f.write(original_content)
91
92
  logger.info(f"[Shadow] Initialized shadow file from original: {target_path}")
92
93
  else:
93
- return ToolResult(success=False, message=f"Error: File not found at path: {file_path}")
94
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.file_not_found", file_path=file_path))
94
95
  except Exception as e:
95
96
  logger.error(f"Error reading file for replace '{file_path}': {str(e)}")
96
- return ToolResult(success=False, message=f"An error occurred while reading the file for replacement: {str(e)}")
97
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.read_error", error=str(e)))
97
98
 
98
99
  parsed_blocks = self.parse_diff(diff_content)
99
100
  if not parsed_blocks:
100
- return ToolResult(success=False, message="Error: No valid SEARCH/REPLACE blocks found in the provided diff.")
101
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.no_valid_blocks"))
101
102
 
102
103
  current_content = original_content
103
104
  applied_count = 0
@@ -121,7 +122,7 @@ class ReplaceInFileToolResolver(BaseToolResolver):
121
122
  # continue applying remaining blocks
122
123
 
123
124
  if applied_count == 0 and errors:
124
- return ToolResult(success=False, message=f"Failed to apply any changes. Errors:\n" + "\n".join(errors))
125
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.apply_failed", errors="\n".join(errors)))
125
126
 
126
127
  try:
127
128
  os.makedirs(os.path.dirname(target_path), exist_ok=True)
@@ -129,9 +130,17 @@ class ReplaceInFileToolResolver(BaseToolResolver):
129
130
  f.write(current_content)
130
131
  logger.info(f"Successfully applied {applied_count}/{len(parsed_blocks)} changes to file: {file_path}")
131
132
 
132
- message = f"Successfully applied {applied_count}/{len(parsed_blocks)} changes to file: {file_path}."
133
133
  if errors:
134
- message += "\nWarnings:\n" + "\n".join(errors)
134
+ message = get_message_with_format("replace_in_file.apply_success_with_warnings",
135
+ applied=applied_count,
136
+ total=len(parsed_blocks),
137
+ file_path=file_path,
138
+ errors="\n".join(errors))
139
+ else:
140
+ message = get_message_with_format("replace_in_file.apply_success",
141
+ applied=applied_count,
142
+ total=len(parsed_blocks),
143
+ file_path=file_path)
135
144
 
136
145
  # 变更跟踪,回调AgenticEdit
137
146
  if self.agent:
@@ -141,4 +150,4 @@ class ReplaceInFileToolResolver(BaseToolResolver):
141
150
  return ToolResult(success=True, message=message, content=current_content)
142
151
  except Exception as e:
143
152
  logger.error(f"Error writing replaced content to file '{file_path}': {str(e)}")
144
- return ToolResult(success=False, message=f"An error occurred while writing the modified file: {str(e)}")
153
+ return ToolResult(success=False, message=get_message_with_format("replace_in_file.write_error", error=str(e)))
@@ -80,6 +80,11 @@ class TokenUsageEvent(BaseModel):
80
80
  """Represents the result of executing a tool."""
81
81
  usage: Any
82
82
 
83
+ class PlanModeRespondEvent(BaseModel):
84
+ """Represents the LLM attempting to complete the task."""
85
+ completion: SkipValidation[PlanModeRespondTool] # Skip validation
86
+ completion_xml: str
87
+
83
88
  class CompletionEvent(BaseModel):
84
89
  """Represents the LLM attempting to complete the task."""
85
90
  completion: SkipValidation[AttemptCompletionTool] # Skip validation
@@ -93,7 +93,7 @@ TOOL_DISPLAY_MESSAGES: Dict[Type[BaseTool], Dict[str, str]] = {
93
93
  "[dim]工具:[/dim] [blue]{{ tool_name }}[/]\n"
94
94
  "[dim]参数:[/dim] {{ arguments_snippet }}{{ ellipsis }}"
95
95
  )
96
- }
96
+ }
97
97
  # AttemptCompletionTool is handled separately in the display loop
98
98
  }
99
99
 
@@ -165,7 +165,7 @@ def get_tool_display_message(tool: BaseTool) -> str:
165
165
  "options_text": options_text_zh if lang == 'zh' else options_text_en
166
166
  }
167
167
  elif isinstance(tool, UseMcpTool):
168
- args_str = json.dumps(tool.arguments, ensure_ascii=False)
168
+ args_str = tool.query
169
169
  snippet = args_str[:100]
170
170
  context = {
171
171
  "server_name": tool.server_name,
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.337"
1
+ __version__ = "0.1.340"