auto-coder 0.1.263__py3-none-any.whl → 0.1.265__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (58) hide show
  1. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/RECORD +58 -55
  3. autocoder/agent/planner.py +4 -4
  4. autocoder/auto_coder.py +26 -21
  5. autocoder/auto_coder_server.py +7 -7
  6. autocoder/chat_auto_coder.py +203 -98
  7. autocoder/commands/auto_command.py +81 -4
  8. autocoder/commands/tools.py +48 -50
  9. autocoder/common/__init__.py +6 -1
  10. autocoder/common/auto_coder_lang.py +41 -3
  11. autocoder/common/code_auto_generate.py +3 -3
  12. autocoder/common/code_auto_generate_diff.py +12 -15
  13. autocoder/common/code_auto_generate_editblock.py +3 -3
  14. autocoder/common/code_auto_generate_strict_diff.py +3 -3
  15. autocoder/common/code_auto_merge.py +23 -3
  16. autocoder/common/code_auto_merge_diff.py +29 -4
  17. autocoder/common/code_auto_merge_editblock.py +25 -5
  18. autocoder/common/code_auto_merge_strict_diff.py +26 -6
  19. autocoder/common/code_modification_ranker.py +65 -3
  20. autocoder/common/command_completer.py +3 -0
  21. autocoder/common/command_generator.py +24 -8
  22. autocoder/common/command_templates.py +2 -2
  23. autocoder/common/conf_import_export.py +105 -0
  24. autocoder/common/conf_validator.py +7 -1
  25. autocoder/common/context_pruner.py +305 -0
  26. autocoder/common/files.py +41 -2
  27. autocoder/common/image_to_page.py +11 -11
  28. autocoder/common/index_import_export.py +38 -18
  29. autocoder/common/mcp_hub.py +3 -3
  30. autocoder/common/mcp_server.py +2 -2
  31. autocoder/common/shells.py +254 -13
  32. autocoder/common/stats_panel.py +126 -0
  33. autocoder/dispacher/actions/action.py +6 -18
  34. autocoder/dispacher/actions/copilot.py +2 -2
  35. autocoder/dispacher/actions/plugins/action_regex_project.py +1 -3
  36. autocoder/dispacher/actions/plugins/action_translate.py +1 -1
  37. autocoder/index/entry.py +8 -2
  38. autocoder/index/filter/normal_filter.py +13 -2
  39. autocoder/index/filter/quick_filter.py +127 -13
  40. autocoder/index/index.py +8 -7
  41. autocoder/models.py +2 -2
  42. autocoder/pyproject/__init__.py +5 -5
  43. autocoder/rag/cache/byzer_storage_cache.py +4 -4
  44. autocoder/rag/cache/file_monitor_cache.py +2 -2
  45. autocoder/rag/cache/simple_cache.py +4 -4
  46. autocoder/rag/long_context_rag.py +2 -2
  47. autocoder/regexproject/__init__.py +3 -2
  48. autocoder/suffixproject/__init__.py +3 -2
  49. autocoder/tsproject/__init__.py +3 -2
  50. autocoder/utils/conversation_store.py +1 -1
  51. autocoder/utils/operate_config_api.py +3 -3
  52. autocoder/utils/project_structure.py +258 -3
  53. autocoder/utils/thread_utils.py +6 -1
  54. autocoder/version.py +1 -1
  55. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/LICENSE +0 -0
  56. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/WHEEL +0 -0
  57. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/entry_points.txt +0 -0
  58. {auto_coder-0.1.263.dist-info → auto_coder-0.1.265.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,305 @@
1
+ from typing import List, Dict, Any, Union
2
+ from pathlib import Path
3
+ import json
4
+ from loguru import logger
5
+ from autocoder.rag.token_counter import count_tokens
6
+ from autocoder.common import AutoCoderArgs,SourceCode
7
+ from byzerllm.utils.client.code_utils import extract_code
8
+ from autocoder.index.types import VerifyFileRelevance
9
+ import byzerllm
10
+ from concurrent.futures import ThreadPoolExecutor, as_completed
11
+
12
+ class PruneContext:
13
+ def __init__(self, max_tokens: int, args: AutoCoderArgs, llm: Union[byzerllm.ByzerLLM, byzerllm.SimpleByzerLLM]):
14
+ self.max_tokens = max_tokens
15
+ self.args = args
16
+ self.llm = llm
17
+
18
+ def _delete_overflow_files(self, file_paths: List[str]) -> List[SourceCode]:
19
+ """直接删除超出 token 限制的文件"""
20
+ total_tokens = 0
21
+ selected_files = []
22
+ token_count = 0
23
+ for file_path in file_paths:
24
+ try:
25
+ with open(file_path, "r", encoding="utf-8") as f:
26
+ content = f.read()
27
+ token_count = count_tokens(content)
28
+ if total_tokens + token_count <= self.max_tokens:
29
+ total_tokens += token_count
30
+ print(f"{file_path} {token_count} {content}")
31
+ selected_files.append(SourceCode(module_name=file_path,source_code=content,tokens=token_count))
32
+ else:
33
+ break
34
+ except Exception as e:
35
+ logger.error(f"Failed to read file {file_path}: {e}")
36
+ selected_files.append(SourceCode(module_name=file_path,source_code=content,tokens=token_count))
37
+
38
+ return selected_files
39
+
40
+ def _extract_code_snippets(self, file_paths: List[str], conversations: List[Dict[str, str]]) -> List[SourceCode]:
41
+ """抽取关键代码片段策略"""
42
+ token_count = 0
43
+ selected_files = []
44
+ full_file_tokens = int(self.max_tokens * 0.8)
45
+
46
+ @byzerllm.prompt()
47
+ def extract_code_snippets(conversations: List[Dict[str, str]], content: str) -> str:
48
+ """
49
+ 根据提供的代码文件和对话历史提取相关代码片段。
50
+
51
+ 处理示例:
52
+ <examples>
53
+ 1. 代码文件:
54
+ <code_file>
55
+ 1 def add(a, b):
56
+ 2 return a + b
57
+ 3 def sub(a, b):
58
+ 4 return a - b
59
+ </code_file>
60
+ <conversation_history>
61
+ <user>: 如何实现加法?
62
+ </conversation_history>
63
+
64
+ 输出:
65
+ ```json
66
+ [
67
+ {"start_line": 1, "end_line": 2}
68
+ ]
69
+ ```
70
+
71
+ 2. 代码文件:
72
+ 1 class User:
73
+ 2 def __init__(self, name):
74
+ 3 self.name = name
75
+ 4 def greet(self):
76
+ 5 return f"Hello, {self.name}"
77
+ </code_file>
78
+ <conversation_history>
79
+ <user>: 如何创建一个User对象?
80
+ </conversation_history>
81
+
82
+ 输出:
83
+ ```json
84
+ [
85
+ {"start_line": 1, "end_line": 3}
86
+ ]
87
+ ```
88
+
89
+ 3. 代码文件:
90
+ <code_file>
91
+ 1 def foo():
92
+ 2 pass
93
+ </code_file>
94
+ <conversation_history>
95
+ <user>: 如何实现减法?
96
+ </conversation_history>
97
+
98
+ 输出:
99
+ ```json
100
+ []
101
+ ```
102
+ </examples>
103
+
104
+ 输入:
105
+ 1. 代码文件内容:
106
+ <code_file>
107
+ {{ content }}
108
+ </code_file>
109
+
110
+ 2. 对话历史:
111
+ <conversation_history>
112
+ {% for msg in conversations %}
113
+ <{{ msg.role }}>: {{ msg.content }}
114
+ {% endfor %}
115
+ </conversation_history>
116
+
117
+ 任务:
118
+ 1. 分析最后一个用户问题及其上下文。
119
+ 2. 在代码文件中找出与问题相关的一个或多个重要代码段。
120
+ 3. 对每个相关代码段,确定其起始行号(start_line)和结束行号(end_line)。
121
+ 4. 代码段数量不超过4个。
122
+
123
+ 输出要求:
124
+ 1. 返回一个JSON数组,每个元素包含"start_line"和"end_line"。
125
+ 2. start_line和end_line必须是整数,表示代码文件中的行号。
126
+ 3. 行号从1开始计数。
127
+ 4. 如果没有相关代码段,返回空数组[]。
128
+
129
+ 输出格式:
130
+ 严格的JSON数组,不包含其他文字或解释。
131
+
132
+ ```json
133
+ [
134
+ {"start_line": 第一个代码段的起始行号, "end_line": 第一个代码段的结束行号},
135
+ {"start_line": 第二个代码段的起始行号, "end_line": 第二个代码段的结束行号}
136
+ ]
137
+ ```
138
+ """
139
+
140
+ for file_path in file_paths:
141
+ try:
142
+ with open(file_path, "r", encoding="utf-8") as f:
143
+ content = f.read()
144
+
145
+ # 完整文件优先
146
+ tokens = count_tokens(content)
147
+ if token_count + tokens <= full_file_tokens:
148
+ selected_files.append(SourceCode(module_name=file_path,source_code=content,tokens=tokens))
149
+ token_count += tokens
150
+ continue
151
+
152
+ # 抽取关键片段
153
+ extracted = extract_code_snippets.with_llm(self.llm).run(
154
+ conversations=conversations,
155
+ content=content
156
+ )
157
+
158
+ if extracted:
159
+ json_str = extract_code(extracted)[0][1]
160
+ snippets = json.loads(json_str)
161
+ new_content = self._build_snippet_content(file_path, content, snippets)
162
+
163
+ snippet_tokens = count_tokens(new_content)
164
+ if token_count + snippet_tokens <= self.max_tokens:
165
+ selected_files.append(SourceCode(module_name=file_path,source_code=new_content,tokens=snippet_tokens))
166
+ token_count += snippet_tokens
167
+ else:
168
+ break
169
+ except Exception as e:
170
+ logger.error(f"Failed to process {file_path}: {e}")
171
+ continue
172
+
173
+ return selected_files
174
+
175
+ def _build_snippet_content(self, file_path: str, full_content: str, snippets: List[dict]) -> str:
176
+ """构建包含代码片段的文件内容"""
177
+ lines = full_content.split("\n")
178
+ header = f"Snippets:\n"
179
+
180
+ content = []
181
+ for snippet in snippets:
182
+ start = max(0, snippet["start_line"] - 1)
183
+ end = min(len(lines), snippet["end_line"])
184
+ content.append(f"# Lines {start+1}-{end} ({snippet.get('reason','')})")
185
+ content.extend(lines[start:end])
186
+
187
+ return header + "\n".join(content)
188
+
189
+ def handle_overflow(
190
+ self,
191
+ file_paths: List[str],
192
+ conversations: List[Dict[str, str]],
193
+ strategy: str = "score"
194
+ ) -> List[SourceCode]:
195
+ """
196
+ 处理超出 token 限制的文件
197
+ :param file_paths: 要处理的文件路径列表
198
+ :param conversations: 对话上下文(用于提取策略)
199
+ :param strategy: 处理策略 (delete/extract/score)
200
+ """
201
+ total_tokens,sources = self._count_tokens(file_paths)
202
+ if total_tokens <= self.max_tokens:
203
+ return sources
204
+
205
+ if strategy == "score":
206
+ return self._score_and_filter_files(file_paths, conversations)
207
+ if strategy == "delete":
208
+ return self._delete_overflow_files(file_paths)
209
+ elif strategy == "extract":
210
+ return self._extract_code_snippets(file_paths, conversations)
211
+ else:
212
+ raise ValueError(f"无效策略: {strategy}. 可选值: delete/extract/score")
213
+
214
+ def _count_tokens(self, file_paths: List[str]) -> int:
215
+ """计算文件总token数"""
216
+ total_tokens = 0
217
+ sources = []
218
+ for file_path in file_paths:
219
+ try:
220
+ with open(file_path, "r", encoding="utf-8") as f:
221
+ content = f.read()
222
+ sources.append(SourceCode(module_name=file_path,source_code=content,tokens=count_tokens(content)))
223
+ total_tokens += count_tokens(content)
224
+ except Exception as e:
225
+ logger.error(f"Failed to read file {file_path}: {e}")
226
+ total_tokens += 0
227
+ return total_tokens,sources
228
+
229
+ def _score_and_filter_files(self, file_paths: List[str], conversations: List[Dict[str, str]]) -> List[SourceCode]:
230
+ """根据文件相关性评分过滤文件,直到token数大于max_tokens 停止追加"""
231
+ selected_files = []
232
+ total_tokens = 0
233
+ scored_files = []
234
+
235
+ @byzerllm.prompt()
236
+ def verify_file_relevance(file_content: str, conversations: List[Dict[str, str]]) -> str:
237
+ """
238
+ 请验证下面的文件内容是否与用户对话相关:
239
+
240
+ 文件内容:
241
+ {{ file_content }}
242
+
243
+ 历史对话:
244
+ <conversation_history>
245
+ {% for msg in conversations %}
246
+ <{{ msg.role }}>: {{ msg.content }}
247
+ {% endfor %}
248
+ </conversation_history>
249
+
250
+ 相关是指,需要依赖这个文件提供上下文,或者需要修改这个文件才能解决用户的问题。
251
+ 请给出相应的可能性分数:0-10,并结合用户问题,理由控制在50字以内。格式如下:
252
+
253
+ ```json
254
+ {
255
+ "relevant_score": 0-10,
256
+ "reason": "这是相关的原因(不超过10个中文字符)..."
257
+ }
258
+ ```
259
+ """
260
+
261
+ def _score_file(file_path: str) -> dict:
262
+ try:
263
+ with open(file_path, "r", encoding="utf-8") as f:
264
+ content = f.read()
265
+ tokens = count_tokens(content)
266
+ result = verify_file_relevance.with_llm(self.llm).with_return_type(VerifyFileRelevance).run(
267
+ file_content=content,
268
+ conversations=conversations
269
+ )
270
+ return {
271
+ "file_path": file_path,
272
+ "score": result.relevant_score,
273
+ "tokens": tokens,
274
+ "content": content
275
+ }
276
+ except Exception as e:
277
+ logger.error(f"Failed to score file {file_path}: {e}")
278
+ return None
279
+
280
+ # 使用线程池并行打分
281
+ with ThreadPoolExecutor() as executor:
282
+ futures = [executor.submit(_score_file, file_path) for file_path in file_paths]
283
+ for future in as_completed(futures):
284
+ result = future.result()
285
+ print(f"score file {result['file_path']} {result['score']}")
286
+ if result:
287
+ scored_files.append(result)
288
+
289
+ # 第二步:按分数从高到低排序
290
+ scored_files.sort(key=lambda x: x["score"], reverse=True)
291
+
292
+ # 第三步:从高分开始过滤,直到token数大于max_tokens 停止追加
293
+ for file_info in scored_files:
294
+ if total_tokens + file_info["tokens"] <= self.max_tokens:
295
+ selected_files.append(SourceCode(
296
+ module_name=file_info["file_path"],
297
+ source_code=file_info["content"],
298
+ tokens=file_info["tokens"]
299
+ ))
300
+ total_tokens += file_info["tokens"]
301
+ else:
302
+ break
303
+
304
+ return selected_files
305
+
autocoder/common/files.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from autocoder.common.auto_coder_lang import get_message_with_format
2
- from typing import List, Dict, Union
2
+ from typing import List, Dict, Union, Generator, Tuple
3
3
 
4
4
  def read_file(file_path):
5
5
  """Read a file with automatic encoding detection.
@@ -30,6 +30,45 @@ def read_file(file_path):
30
30
  file_path=file_path,
31
31
  encodings=", ".join(encodings)))
32
32
 
33
+ def read_lines(file_path:str):
34
+ encodings = ['utf-8', 'gbk', 'utf-16', 'latin-1']
35
+ for encoding in encodings:
36
+ try:
37
+ with open(file_path, 'r', encoding=encoding) as f:
38
+ return f.readlines()
39
+ except UnicodeDecodeError:
40
+ continue
41
+ raise ValueError(get_message_with_format("file_decode_error",
42
+ file_path=file_path,
43
+ encodings=", ".join(encodings)))
44
+
45
+
46
+
47
+ def read_file_with_line_numbers(file_path: str,line_number_start:int=0) -> Generator[Tuple[int, str], None, None]:
48
+ """Read a file and return its content with line numbers.
49
+
50
+ Args:
51
+ file_path (str): Path to the file to read
52
+
53
+ Returns:
54
+ List[str]: A list of strings where each string is in the format "line_number:line_content"
55
+
56
+ Raises:
57
+ ValueError: If the file cannot be decoded with any of the tried encodings
58
+ """
59
+ encodings = ['utf-8', 'gbk', 'utf-16', 'latin-1']
60
+
61
+ for encoding in encodings:
62
+ try:
63
+ with open(file_path, 'r', encoding=encoding) as file:
64
+ for line_number, line in enumerate(file, start=line_number_start):
65
+ yield (line_number, line)
66
+ except UnicodeDecodeError:
67
+ continue
68
+
69
+ raise ValueError(get_message_with_format("file_decode_error",
70
+ file_path=file_path,
71
+ encodings=", ".join(encodings)))
33
72
 
34
73
 
35
74
  def save_file(file_path: str, content: Union[str, List[str]]) -> None:
@@ -55,4 +94,4 @@ def save_file(file_path: str, content: Union[str, List[str]]) -> None:
55
94
  except IOError as e:
56
95
  raise IOError(get_message_with_format("file_write_error",
57
96
  file_path=file_path,
58
- error=str(e)))
97
+ error=str(e)))
@@ -122,7 +122,7 @@ class ImageToPageDirectly:
122
122
 
123
123
  counter = 1
124
124
  target_html_path = os.path.join(html_dir,f"{html_file_name}-{counter}.html")
125
- with open(target_html_path, "w") as f:
125
+ with open(target_html_path, "w",encoding="utf-8") as f:
126
126
  f.write(html)
127
127
 
128
128
  while counter < max_iter:
@@ -137,11 +137,11 @@ class ImageToPageDirectly:
137
137
 
138
138
  target_html_path = os.path.join(html_dir,f"{html_file_name}-{counter}.html")
139
139
  logger.info(f"generate html: {target_html_path}")
140
- with open(target_html_path, "w") as f:
140
+ with open(target_html_path, "w",encoding="utf-8") as f:
141
141
  f.write(html)
142
142
 
143
143
  logger.info(f"finally generate html: {html_path}")
144
- with open(html_path, "w") as f:
144
+ with open(html_path, "w",encoding="utf-8") as f:
145
145
  f.write(html)
146
146
 
147
147
 
@@ -248,7 +248,7 @@ class ImageToPage:
248
248
  file_path = block.path
249
249
  os.makedirs(os.path.dirname(file_path), exist_ok=True)
250
250
 
251
- with open(file_path, "w") as f:
251
+ with open(file_path, "w",encoding="utf-8") as f:
252
252
  logger.info(f"Upsert path: {file_path}")
253
253
  f.write(block.content)
254
254
  file_modified_num += 1
@@ -268,7 +268,7 @@ class ImageToPage:
268
268
  ## generate html by image description
269
269
  content_contains_html_prompt = self.generate_html.prompt(desc,html_path)
270
270
 
271
- with open(self.args.target_file, "w") as f:
271
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
272
272
  f.write(content_contains_html_prompt)
273
273
 
274
274
  t = self.llm.chat_oai(conversations=[{
@@ -278,7 +278,7 @@ class ImageToPage:
278
278
 
279
279
  content_contains_html = t[0].output
280
280
 
281
- with open(self.args.target_file, "w") as f:
281
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
282
282
  f.write(content_contains_html)
283
283
 
284
284
 
@@ -296,7 +296,7 @@ class ImageToPage:
296
296
 
297
297
  for i in range(max_iter):
298
298
  logger.info(f"iterate {i}")
299
- with open(html_path,"r") as f:
299
+ with open(html_path,"r",encoding="utf-8") as f:
300
300
  prev_html = f.read()
301
301
 
302
302
  gen_screenshots(url=html_path,image_dir=new_image_dir)
@@ -309,7 +309,7 @@ class ImageToPage:
309
309
  ## get new description prompt by comparing old and new image
310
310
  new_desc_prompt = self.get_optimize(self.score(origin_image,new_image))
311
311
 
312
- with open(self.args.target_file, "w") as f:
312
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
313
313
  f.write(new_desc_prompt)
314
314
 
315
315
  t = self.llm.chat_oai(conversations=[{
@@ -319,7 +319,7 @@ class ImageToPage:
319
319
 
320
320
  new_desc = t[0].output
321
321
 
322
- with open(self.args.target_file, "w") as f:
322
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
323
323
  f.write(new_desc)
324
324
 
325
325
  logger.info(f"score old/new image: {new_desc}")
@@ -327,7 +327,7 @@ class ImageToPage:
327
327
  ## generate new html by new description
328
328
  optimze_html_prompt = self.optimize_html.prompt(desc=new_desc,html=prev_html,html_path=html_path)
329
329
 
330
- with open(self.args.target_file, "w") as f:
330
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
331
331
  f.write(optimze_html_prompt)
332
332
 
333
333
  t = self.llm.chat_oai(conversations=[{
@@ -336,7 +336,7 @@ class ImageToPage:
336
336
  }],llm_config={**extra_llm_config})
337
337
  new_code = t[0].output
338
338
 
339
- with open(self.args.target_file, "w") as f:
339
+ with open(self.args.target_file, "w",encoding="utf-8") as f:
340
340
  f.write(new_code)
341
341
 
342
342
  self.write_code(new_code,html_path)
@@ -3,6 +3,9 @@ import json
3
3
  import shutil
4
4
  from loguru import logger
5
5
  from autocoder.common.printer import Printer
6
+ from autocoder.common.result_manager import ResultManager
7
+
8
+ result_manager = ResultManager()
6
9
 
7
10
 
8
11
  def export_index(project_root: str, export_path: str) -> bool:
@@ -22,11 +25,11 @@ def export_index(project_root: str, export_path: str) -> bool:
22
25
  if not os.path.exists(index_path):
23
26
  printer.print_in_terminal("index_not_found", path=index_path)
24
27
  return False
25
-
28
+
26
29
  # Read and convert paths
27
- with open(index_path, "r") as f:
30
+ with open(index_path, "r",encoding="utf-8") as f:
28
31
  index_data = json.load(f)
29
-
32
+
30
33
  # Convert absolute paths to relative
31
34
  converted_data = {}
32
35
  for abs_path, data in index_data.items():
@@ -35,21 +38,29 @@ def export_index(project_root: str, export_path: str) -> bool:
35
38
  data["module_name"] = rel_path
36
39
  converted_data[rel_path] = data
37
40
  except ValueError:
38
- printer.print_in_terminal("index_convert_path_fail", path=abs_path)
41
+ printer.print_in_terminal(
42
+ "index_convert_path_fail", path=abs_path)
39
43
  converted_data[abs_path] = data
40
-
44
+
41
45
  # Write to export location
42
46
  export_file = os.path.join(export_path, "index.json")
43
47
  os.makedirs(export_path, exist_ok=True)
44
- with open(export_file, "w") as f:
48
+ with open(export_file, "w",encoding="utf-8") as f:
45
49
  json.dump(converted_data, f, indent=2)
46
-
50
+ printer.print_in_terminal("index_export_success", path=export_file)
51
+ result_manager.add_result(content=printer.get_message_from_key_with_format("index_export_success", path=export_file), meta={"action": "index_export", "input": {
52
+ "path": export_file
53
+ }})
47
54
  return True
48
-
55
+
49
56
  except Exception as e:
50
57
  printer.print_in_terminal("index_error", error=str(e))
58
+ result_manager.add_result(content=printer.get_message_from_key_with_format("index_error", error=str(e)), meta={"action": "index_export", "input": {
59
+ "path": export_file
60
+ }})
51
61
  return False
52
62
 
63
+
53
64
  def import_index(project_root: str, import_path: str) -> bool:
54
65
  printer = Printer()
55
66
  """
@@ -67,11 +78,11 @@ def import_index(project_root: str, import_path: str) -> bool:
67
78
  if not os.path.exists(import_file):
68
79
  printer.print_in_terminal("index_not_found", path=import_file)
69
80
  return False
70
-
81
+
71
82
  # Read and convert paths
72
- with open(import_file, "r") as f:
83
+ with open(import_file, "r",encoding="utf-8") as f:
73
84
  index_data = json.load(f)
74
-
85
+
75
86
  # Convert relative paths to absolute
76
87
  converted_data = {}
77
88
  for rel_path, data in index_data.items():
@@ -80,22 +91,31 @@ def import_index(project_root: str, import_path: str) -> bool:
80
91
  data["module_name"] = abs_path
81
92
  converted_data[abs_path] = data
82
93
  except Exception:
83
- printer.print_in_terminal("index_convert_path_fail", path=rel_path)
94
+ printer.print_in_terminal(
95
+ "index_convert_path_fail", path=rel_path)
84
96
  converted_data[rel_path] = data
85
-
97
+
86
98
  # Backup existing index
87
99
  index_path = os.path.join(project_root, ".auto-coder", "index.json")
88
100
  if os.path.exists(index_path):
89
101
  backup_path = index_path + ".bak"
90
102
  shutil.copy2(index_path, backup_path)
91
103
  printer.print_in_terminal("index_backup_success", path=backup_path)
92
-
104
+
93
105
  # Write new index
94
- with open(index_path, "w") as f:
106
+ with open(index_path, "w",encoding="utf-8") as f:
95
107
  json.dump(converted_data, f, indent=2)
96
-
97
- return True
98
108
 
109
+ printer.print_in_terminal("index_import_success", path=index_path)
110
+ result_manager.add_result(content=printer.get_message_from_key_with_format("index_import_success", path=index_path), meta={"action": "index_import", "input": {
111
+ "path": index_path
112
+ }})
113
+
114
+ return True
115
+
99
116
  except Exception as e:
100
117
  printer.print_in_terminal("index_error", error=str(e))
101
- return False
118
+ result_manager.add_result(content=printer.get_message_from_key_with_format("index_error", error=str(e)), meta={"action": "index_import", "input": {
119
+ "path": index_path
120
+ }})
121
+ return False
@@ -116,7 +116,7 @@ class McpHub:
116
116
  def _write_default_settings(self):
117
117
  """Write default MCP settings file"""
118
118
  default_settings = {"mcpServers": {}}
119
- with open(self.settings_path, "w") as f:
119
+ with open(self.settings_path, "w",encoding="utf-8") as f:
120
120
  json.dump(default_settings, f, indent=2)
121
121
 
122
122
  async def add_server_config(self, name: str, config:Dict[str,Any]) -> None:
@@ -129,7 +129,7 @@ class McpHub:
129
129
  try:
130
130
  settings = self._read_settings()
131
131
  settings["mcpServers"][name] = config
132
- with open(self.settings_path, "w") as f:
132
+ with open(self.settings_path, "w",encoding="utf-8") as f:
133
133
  json.dump(settings, f, indent=2, ensure_ascii=False)
134
134
  await self.initialize()
135
135
  logger.info(f"Added/updated MCP server config: {name}")
@@ -148,7 +148,7 @@ class McpHub:
148
148
  settings = self._read_settings()
149
149
  if name in settings["mcpServers"]:
150
150
  del settings["mcpServers"][name]
151
- with open(self.settings_path, "w") as f:
151
+ with open(self.settings_path, "w",encoding="utf-8") as f:
152
152
  json.dump(settings, f, indent=2, ensure_ascii=False)
153
153
  logger.info(f"Removed MCP server config: {name}")
154
154
  await self.initialize()
@@ -80,7 +80,7 @@ def get_mcp_external_servers() -> List[McpExternalServer]:
80
80
  if os.path.exists(cache_file):
81
81
  cache_time = os.path.getmtime(cache_file)
82
82
  if time.time() - cache_time < 3600: # 1 hour cache
83
- with open(cache_file, "r") as f:
83
+ with open(cache_file, "r",encoding="utf-8") as f:
84
84
  raw_data = json.load(f)
85
85
  return [McpExternalServer(**item) for item in raw_data]
86
86
 
@@ -91,7 +91,7 @@ def get_mcp_external_servers() -> List[McpExternalServer]:
91
91
  response = requests.get(url)
92
92
  if response.status_code == 200:
93
93
  raw_data = response.json()
94
- with open(cache_file, "w") as f:
94
+ with open(cache_file, "w",encoding="utf-8") as f:
95
95
  json.dump(raw_data, f)
96
96
  return [McpExternalServer(**item) for item in raw_data]
97
97
  return []