auto-coder 0.1.264__py3-none-any.whl → 0.1.265__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (50) hide show
  1. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/METADATA +1 -1
  2. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/RECORD +50 -48
  3. autocoder/agent/planner.py +4 -4
  4. autocoder/auto_coder.py +26 -21
  5. autocoder/auto_coder_server.py +7 -7
  6. autocoder/chat_auto_coder.py +150 -49
  7. autocoder/commands/auto_command.py +81 -4
  8. autocoder/commands/tools.py +48 -50
  9. autocoder/common/__init__.py +0 -1
  10. autocoder/common/auto_coder_lang.py +37 -3
  11. autocoder/common/code_auto_generate.py +3 -3
  12. autocoder/common/code_auto_generate_diff.py +3 -6
  13. autocoder/common/code_auto_generate_editblock.py +3 -3
  14. autocoder/common/code_auto_generate_strict_diff.py +3 -3
  15. autocoder/common/code_auto_merge_diff.py +2 -2
  16. autocoder/common/code_auto_merge_editblock.py +1 -1
  17. autocoder/common/code_auto_merge_strict_diff.py +3 -3
  18. autocoder/common/command_completer.py +3 -0
  19. autocoder/common/command_generator.py +24 -8
  20. autocoder/common/command_templates.py +2 -2
  21. autocoder/common/conf_import_export.py +105 -0
  22. autocoder/common/conf_validator.py +1 -1
  23. autocoder/common/files.py +41 -2
  24. autocoder/common/image_to_page.py +11 -11
  25. autocoder/common/index_import_export.py +38 -18
  26. autocoder/common/mcp_hub.py +3 -3
  27. autocoder/common/mcp_server.py +2 -2
  28. autocoder/common/shells.py +254 -13
  29. autocoder/common/stats_panel.py +126 -0
  30. autocoder/dispacher/actions/action.py +6 -18
  31. autocoder/dispacher/actions/copilot.py +2 -2
  32. autocoder/dispacher/actions/plugins/action_regex_project.py +1 -3
  33. autocoder/dispacher/actions/plugins/action_translate.py +1 -1
  34. autocoder/index/index.py +5 -5
  35. autocoder/models.py +2 -2
  36. autocoder/pyproject/__init__.py +5 -5
  37. autocoder/rag/cache/byzer_storage_cache.py +4 -4
  38. autocoder/rag/cache/file_monitor_cache.py +2 -2
  39. autocoder/rag/cache/simple_cache.py +4 -4
  40. autocoder/rag/long_context_rag.py +2 -2
  41. autocoder/regexproject/__init__.py +3 -2
  42. autocoder/suffixproject/__init__.py +3 -2
  43. autocoder/tsproject/__init__.py +3 -2
  44. autocoder/utils/conversation_store.py +1 -1
  45. autocoder/utils/operate_config_api.py +3 -3
  46. autocoder/version.py +1 -1
  47. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/LICENSE +0 -0
  48. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/WHEEL +0 -0
  49. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/entry_points.txt +0 -0
  50. {auto_coder-0.1.264.dist-info → auto_coder-0.1.265.dist-info}/top_level.txt +0 -0
@@ -27,6 +27,7 @@ from autocoder.utils.queue_communicate import (
27
27
  )
28
28
  import sys
29
29
  import io
30
+ from autocoder.common import files as files_utils
30
31
 
31
32
  @byzerllm.prompt()
32
33
  def detect_rm_command(command: str) -> Bool:
@@ -286,34 +287,33 @@ class AutoCommandTools:
286
287
 
287
288
  result = []
288
289
  try:
289
- with open(absolute_path, 'r', encoding='utf-8') as f:
290
- lines = f.readlines()
290
+
291
+ lines = files_utils.read_lines(absolute_path)
292
+ # Find all lines containing the keyword
293
+ keyword_lines = []
294
+ for i, line in enumerate(lines):
295
+ if keyword.lower() in line.lower():
296
+ keyword_lines.append(i)
297
+
298
+ # Process each keyword line and its surrounding range
299
+ processed_ranges = set()
300
+ for line_num in keyword_lines:
301
+ # Calculate range boundaries
302
+ start = max(0, line_num - before_size)
303
+ end = min(len(lines), line_num + after_size + 1)
291
304
 
292
- # Find all lines containing the keyword
293
- keyword_lines = []
294
- for i, line in enumerate(lines):
295
- if keyword.lower() in line.lower():
296
- keyword_lines.append(i)
305
+ # Check if this range overlaps with any previously processed range
306
+ range_key = (start, end)
307
+ if range_key in processed_ranges:
308
+ continue
297
309
 
298
- # Process each keyword line and its surrounding range
299
- processed_ranges = set()
300
- for line_num in keyword_lines:
301
- # Calculate range boundaries
302
- start = max(0, line_num - before_size)
303
- end = min(len(lines), line_num + after_size + 1)
304
-
305
- # Check if this range overlaps with any previously processed range
306
- range_key = (start, end)
307
- if range_key in processed_ranges:
308
- continue
309
-
310
- processed_ranges.add(range_key)
311
-
312
- # Format the content block
313
- content = f"##File: {absolute_path}\n"
314
- content += f"##Line: {start+1}-{end}\n\n"
315
- content += "".join(lines[start:end])
316
- result.append(content)
310
+ processed_ranges.add(range_key)
311
+
312
+ # Format the content block
313
+ content = f"##File: {absolute_path}\n"
314
+ content += f"##Line: {start+1}-{end}\n\n"
315
+ content += "".join(lines[start:end])
316
+ result.append(content)
317
317
 
318
318
  except Exception as e:
319
319
  v = f"Error reading file {absolute_path}: {str(e)}"
@@ -403,23 +403,22 @@ class AutoCommandTools:
403
403
  if path in os.path.join(root, file):
404
404
  absolute_path = os.path.join(root, file)
405
405
  break
406
-
407
- with open(absolute_path, "r", encoding="utf-8") as f:
408
- if path in file_line_ranges:
409
- # Read specific line ranges
410
- lines = f.readlines()
411
- filtered_lines = []
412
- for start, end in file_line_ranges[path]:
413
- # Adjust for 0-based indexing
414
- start = max(0, start - 1)
415
- end = min(len(lines), end)
416
- content = "".join(lines[start:end])
417
- filtered_lines.extend(f"##File: {absolute_path}\n##Line: {start}-{end}\n\n{content}")
418
- source_code = "".join(filtered_lines)
419
- else:
420
- # Read entire file if no range specified
421
- content = f.read()
422
- source_code = f"##File: {absolute_path}\n\n{content}"
406
+
407
+ if path in file_line_ranges:
408
+ # Read specific line ranges
409
+ lines = files_utils.read_lines(absolute_path)
410
+ filtered_lines = []
411
+ for start, end in file_line_ranges[path]:
412
+ # Adjust for 0-based indexing
413
+ start = max(0, start - 1)
414
+ end = min(len(lines), end)
415
+ content = "".join(lines[start:end])
416
+ filtered_lines.extend(f"##File: {absolute_path}\n##Line: {start}-{end}\n\n{content}")
417
+ source_code = "".join(filtered_lines)
418
+ else:
419
+ # Read entire file if no range specified
420
+ content = files_utils.read_file(absolute_path)
421
+ source_code = f"##File: {absolute_path}\n\n{content}"
423
422
 
424
423
  sc = SourceCode(module_name=absolute_path, source_code=source_code)
425
424
  source_code_str += f"{sc.source_code}\n\n"
@@ -510,13 +509,12 @@ class AutoCommandTools:
510
509
  for file in files:
511
510
  file_path = os.path.join(root, file)
512
511
  try:
513
- with open(file_path, "r", encoding="utf-8") as f:
514
- content = f.read()
515
- if keyword.lower() in content.lower():
516
- matched_files.append(file_path)
517
- # Limit to first 10 matches
518
- if len(matched_files) >= 10:
519
- break
512
+ content = files_utils.read_file(file_path)
513
+ if keyword.lower() in content.lower():
514
+ matched_files.append(file_path)
515
+ # Limit to first 10 matches
516
+ if len(matched_files) >= 10:
517
+ break
520
518
  except Exception:
521
519
  # Skip files that can't be read
522
520
  pass
@@ -6,7 +6,6 @@ import os
6
6
  import time
7
7
  from typing import List, Dict, Any, Optional, Union
8
8
 
9
-
10
9
  class SourceCode(pydantic.BaseModel):
11
10
  module_name: str
12
11
  source_code: str
@@ -3,6 +3,7 @@ from byzerllm.utils import format_str_jinja2
3
3
 
4
4
  MESSAGES = {
5
5
  "en": {
6
+ "invalid_file_pattern": "Invalid file pattern: {{file_pattern}}. e.g. regex://.*/package-lock\\.json",
6
7
  "config_validation_error": "Config validation error: {{error}}",
7
8
  "invalid_boolean_value": "Value '{{value}}' is not a valid boolean(true/false)",
8
9
  "invalid_integer_value": "Value '{{value}}' is not a valid integer",
@@ -139,9 +140,40 @@ MESSAGES = {
139
140
  "conversation_pruning_start": "⚠️ Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
140
141
  "invalid_file_number": "⚠️ Invalid file number {{file_number}}, total files: {{total_files}}",
141
142
  "all_merge_results_failed": "⚠️ All merge attempts failed, returning first candidate",
142
- "only_one_merge_result_success": "✅ Only one merge result succeeded, returning that candidate"
143
- },
143
+ "only_one_merge_result_success": "✅ Only one merge result succeeded, returning that candidate",
144
+ "conf_import_success": "Successfully imported configuration: {{path}}",
145
+ "conf_export_success": "Successfully exported configuration: {{path}}",
146
+ "conf_import_error": "Error importing configuration: {{error}}",
147
+ "conf_export_error": "Error exporting configuration: {{error}}",
148
+ "conf_import_invalid_format": "Invalid import configuration format, expected 'key:value'",
149
+ "conf_export_invalid_format": "Invalid export configuration format, expected 'key:value'",
150
+ "conf_import_file_not_found": "Import configuration file not found: {{file_path}}",
151
+ "conf_export_file_not_found": "Export configuration file not found: {{file_path}}",
152
+ "conf_import_file_empty": "Import configuration file is empty: {{file_path}}",
153
+ "conf_export_file_empty": "Export configuration file is empty: {{file_path}}",
154
+ "generated_shell_script": "Generated Shell Script",
155
+ "confirm_execute_shell_script": "Do you want to execute this shell script?",
156
+ "shell_script_not_executed": "Shell script was not executed",
157
+ "conf_not_found": "Configuration file not found: {{path}}",
158
+ "index_export_success": "Index exported successfully: {{path}}",
159
+ "index_import_success": "Index imported successfully: {{path}}",
160
+ },
144
161
  "zh": {
162
+ "invalid_file_pattern": "无效的文件模式: {{file_pattern}}. 例如: regex://.*/package-lock\\.json",
163
+ "conf_not_found": "未找到配置文件: {{path}}",
164
+ "conf_import_success": "成功导入配置: {{path}}",
165
+ "conf_export_success": "成功导出配置: {{path}}",
166
+ "conf_import_error": "导入配置出错: {{error}}",
167
+ "conf_export_error": "导出配置出错: {{error}}",
168
+ "conf_import_invalid_format": "导入配置格式无效, 应为 'key:value' 格式",
169
+ "conf_export_invalid_format": "导出配置格式无效, 应为 'key:value' 格式",
170
+ "conf_import_file_not_found": "未找到导入配置文件: {{file_path}}",
171
+ "conf_export_file_not_found": "未找到导出配置文件: {{file_path}}",
172
+ "conf_import_file_empty": "导入配置文件为空: {{file_path}}",
173
+ "conf_export_file_empty": "导出配置文件为空: {{file_path}}",
174
+ "generated_shell_script": "生成的 Shell 脚本",
175
+ "confirm_execute_shell_script": "您要执行此 shell 脚本吗?",
176
+ "shell_script_not_executed": "Shell 脚本未执行",
145
177
  "config_validation_error": "配置验证错误: {{error}}",
146
178
  "invalid_boolean_value": "值 '{{value}}' 不是有效的布尔值(true/false)",
147
179
  "invalid_integer_value": "值 '{{value}}' 不是有效的整数",
@@ -276,7 +308,9 @@ MESSAGES = {
276
308
  "conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。",
277
309
  "invalid_file_number": "⚠️ 无效的文件编号 {{file_number}},总文件数为 {{total_files}}",
278
310
  "all_merge_results_failed": "⚠️ 所有合并尝试都失败,返回第一个候选",
279
- "only_one_merge_result_success": "✅ 只有一个合并结果成功,返回该候选"
311
+ "only_one_merge_result_success": "✅ 只有一个合并结果成功,返回该候选",
312
+ "index_export_success": "索引导出成功: {{path}}",
313
+ "index_import_success": "索引导入成功: {{path}}",
280
314
  }}
281
315
 
282
316
 
@@ -197,7 +197,7 @@ class CodeAutoGenerate:
197
197
  instruction=query, content=source_content
198
198
  )
199
199
 
200
- with open(self.args.target_file, "w") as file:
200
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
201
201
  file.write(init_prompt)
202
202
 
203
203
  conversations = []
@@ -298,7 +298,7 @@ class CodeAutoGenerate:
298
298
 
299
299
  conversations = [{"role": "user", "content": init_prompt}]
300
300
 
301
- with open(self.args.target_file, "w") as file:
301
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
302
302
  file.write(init_prompt)
303
303
 
304
304
  t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
@@ -320,7 +320,7 @@ class CodeAutoGenerate:
320
320
 
321
321
  conversations.append({"role": "user", "content": "继续"})
322
322
 
323
- with open(self.args.target_file, "w") as file:
323
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
324
324
  file.write("继续")
325
325
 
326
326
  t = self.llm.chat_oai(conversations=conversations, llm_config=llm_config)
@@ -315,10 +315,7 @@ class CodeAutoGenerateDiff:
315
315
  elif self.args.template == "auto_implement":
316
316
  init_prompt = self.auto_implement_function.prompt(
317
317
  instruction=query, content=source_content
318
- )
319
-
320
- with open(self.args.target_file, "w") as file:
321
- file.write(init_prompt)
318
+ )
322
319
 
323
320
  conversations = []
324
321
 
@@ -447,7 +444,7 @@ class CodeAutoGenerateDiff:
447
444
  # conversations.append({"role": "system", "content": sys_prompt.prompt()})
448
445
  conversations.append({"role": "user", "content": init_prompt})
449
446
 
450
- with open(self.args.target_file, "w") as file:
447
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
451
448
  file.write(init_prompt)
452
449
 
453
450
  code_llm = self.llms[0]
@@ -467,7 +464,7 @@ class CodeAutoGenerateDiff:
467
464
 
468
465
  conversations.append({"role": "user", "content": "继续"})
469
466
 
470
- with open(self.args.target_file, "w") as file:
467
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
471
468
  file.write("继续")
472
469
 
473
470
  t = code_llm.chat_oai(
@@ -418,7 +418,7 @@ class CodeAutoGenerateEditBlock:
418
418
  instruction=query, content=source_content
419
419
  )
420
420
 
421
- with open(self.args.target_file, "w") as file:
421
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
422
422
  file.write(init_prompt)
423
423
 
424
424
  conversations = []
@@ -538,7 +538,7 @@ class CodeAutoGenerateEditBlock:
538
538
  # conversations.append({"role": "system", "content": sys_prompt.prompt()})
539
539
  conversations.append({"role": "user", "content": init_prompt})
540
540
 
541
- with open(self.args.target_file, "w") as file:
541
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
542
542
  file.write(init_prompt)
543
543
 
544
544
  code_llm = self.llms[0]
@@ -558,7 +558,7 @@ class CodeAutoGenerateEditBlock:
558
558
 
559
559
  conversations.append({"role": "user", "content": "继续"})
560
560
 
561
- with open(self.args.target_file, "w") as file:
561
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
562
562
  file.write("继续")
563
563
 
564
564
  t = code_llm.chat_oai(
@@ -287,7 +287,7 @@ class CodeAutoGenerateStrictDiff:
287
287
  instruction=query, content=source_content
288
288
  )
289
289
 
290
- with open(self.args.target_file, "w") as file:
290
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
291
291
  file.write(init_prompt)
292
292
 
293
293
  conversations = []
@@ -417,7 +417,7 @@ class CodeAutoGenerateStrictDiff:
417
417
  # conversations.append({"role": "system", "content": sys_prompt.prompt()})
418
418
  conversations.append({"role": "user", "content": init_prompt})
419
419
 
420
- with open(self.args.target_file, "w") as file:
420
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
421
421
  file.write(init_prompt)
422
422
 
423
423
  code_llm = self.llms[0]
@@ -437,7 +437,7 @@ class CodeAutoGenerateStrictDiff:
437
437
 
438
438
  conversations.append({"role": "user", "content": "继续"})
439
439
 
440
- with open(self.args.target_file, "w") as file:
440
+ with open(self.args.target_file, "w",encoding="utf-8") as file:
441
441
  file.write("继续")
442
442
 
443
443
  t = code_llm.chat_oai(
@@ -462,7 +462,7 @@ class CodeAutoMergeDiff:
462
462
  full_path = self.abs_root_path(path)
463
463
 
464
464
  if not os.path.exists(full_path):
465
- with open(full_path, "w",encoding="utf-8") as f:
465
+ with open(full_path, "w") as f:
466
466
  f.write("")
467
467
 
468
468
  content = FileUtils.read_file(full_path)
@@ -531,7 +531,7 @@ class CodeAutoMergeDiff:
531
531
  def _merge_code(self, content: str,force_skip_git:bool=False):
532
532
  total = 0
533
533
 
534
- file_content = open(self.args.file).read()
534
+ file_content = FileUtils.read_file(self.args.file)
535
535
  md5 = hashlib.md5(file_content.encode('utf-8')).hexdigest()
536
536
  # get the file name
537
537
  file_name = os.path.basename(self.args.file)
@@ -289,7 +289,7 @@ class CodeAutoMergeEditBlock:
289
289
  )
290
290
 
291
291
  def _merge_code(self, content: str, force_skip_git: bool = False):
292
- file_content = open(self.args.file).read()
292
+ file_content = FileUtils.read_file(self.args.file)
293
293
  md5 = hashlib.md5(file_content.encode("utf-8")).hexdigest()
294
294
  file_name = os.path.basename(self.args.file)
295
295
 
@@ -10,6 +10,7 @@ import hashlib
10
10
  from pathlib import Path
11
11
  from autocoder.common.types import CodeGenerateResult, MergeCodeWithoutEffect
12
12
  from autocoder.common.code_modification_ranker import CodeModificationRanker
13
+ from autocoder.common import files as FileUtils
13
14
 
14
15
  class PathAndCode(pydantic.BaseModel):
15
16
  path: str
@@ -195,8 +196,7 @@ class CodeAutoMergeStrictDiff:
195
196
  continue
196
197
 
197
198
  if full_path not in file_content_mapping:
198
- with open(full_path, "r") as f:
199
- file_content_mapping[full_path] = f.read()
199
+ file_content_mapping[full_path] = FileUtils.read_file(full_path)
200
200
 
201
201
  try:
202
202
  import patch
@@ -224,7 +224,7 @@ class CodeAutoMergeStrictDiff:
224
224
  def _merge_code(self, content: str, force_skip_git: bool = False):
225
225
  total = 0
226
226
 
227
- file_content = open(self.args.file).read()
227
+ file_content = FileUtils.read_file(self.args.file)
228
228
  md5 = hashlib.md5(file_content.encode('utf-8')).hexdigest()
229
229
  # get the file name
230
230
  file_name = os.path.basename(self.args.file)
@@ -43,6 +43,9 @@ COMMANDS = {
43
43
  "/output_price": "",
44
44
  },
45
45
  "/auto": {
46
+ },
47
+ "/shell": {
48
+ "/chat": "",
46
49
  }
47
50
  }
48
51
 
@@ -4,6 +4,7 @@ from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
4
4
  from autocoder.common import detect_env
5
5
  from autocoder.common import shells
6
6
  from autocoder.common.printer import Printer
7
+ from typing import Dict,Union
7
8
 
8
9
  @byzerllm.prompt()
9
10
  def _generate_shell_script(user_input: str) -> str:
@@ -14,6 +15,11 @@ def _generate_shell_script(user_input: str) -> str:
14
15
  Python版本: {{ env_info.python_version }}
15
16
  终端类型: {{ env_info.shell_type }}
16
17
  终端编码: {{ env_info.shell_encoding }}
18
+
19
+ {%- if shell_type %}
20
+ 脚本类型:{{ shell_type }}
21
+ {%- endif %}
22
+
17
23
  {%- if env_info.conda_env %}
18
24
  Conda环境: {{ env_info.conda_env }}
19
25
  {%- endif %}
@@ -21,29 +27,39 @@ def _generate_shell_script(user_input: str) -> str:
21
27
  虚拟环境: {{ env_info.virtualenv }}
22
28
  {%- endif %}
23
29
 
24
- 根据用户的输入以及当前的操作系统和Shell类型生成合适的 shell 脚本,注意只能生成一个shell脚本,不要生成多个。
30
+ 根据用户的输入以及当前的操作系统和终端类型以及脚本类型生成脚本,
31
+ 注意只能生成一个shell脚本,不要生成多个。
25
32
 
26
33
  用户输入: {{ user_input }}
27
34
 
28
- 请生成一个适当的 shell 脚本来执行用户的请求。确保脚本是安全的,并且可以在当前Shell环境中运行。
35
+ 请生成一个适当的脚本来执行用户的请求。确保脚本是安全的,并且可以在当前Shell环境中运行。
29
36
  脚本应该包含必要的注释来解释每个步骤。
30
37
  脚本内容请用如下方式返回:
31
38
 
32
- ```shell
33
- # 你的 shell 脚本内容
39
+ ```script
40
+ # 你的 script 脚本内容
34
41
  ```
35
42
  """
36
- env_info = detect_env()
43
+ env_info = detect_env()
44
+ shell_type = "bash"
45
+ if shells.is_running_in_cmd():
46
+ shell_type = "cmd"
47
+ elif shells.is_running_in_powershell():
48
+ shell_type = "powershell"
37
49
  return {
38
50
  "env_info": env_info,
39
- "shell_type": shells.get_terminal_name(),
51
+ "shell_type": shell_type,
40
52
  "shell_encoding": shells.get_terminal_encoding()
41
53
  }
42
54
 
43
55
 
44
- def generate_shell_script(user_input: str, llm: byzerllm.ByzerLLM) -> str:
56
+ def generate_shell_script(user_input: str, llm: Union[byzerllm.ByzerLLM,byzerllm.SimpleByzerLLM]) -> str:
45
57
  # 获取 prompt 内容
46
58
  prompt = _generate_shell_script.prompt(user_input=user_input)
59
+ if llm.get_sub_client("chat_model"):
60
+ shell_llm = llm.get_sub_client("chat_model")
61
+ else:
62
+ shell_llm = llm
47
63
 
48
64
  # 构造对话上下文
49
65
  conversations = [{"role": "user", "content": prompt}]
@@ -52,7 +68,7 @@ def generate_shell_script(user_input: str, llm: byzerllm.ByzerLLM) -> str:
52
68
  printer = Printer()
53
69
  title = printer.get_message_from_key("generating_shell_script")
54
70
  result, _ = stream_out(
55
- llm.stream_chat_oai(conversations=conversations, delta_mode=True),
71
+ shell_llm.stream_chat_oai(conversations=conversations, delta_mode=True),
56
72
  model_name=llm.default_model_name,
57
73
  title=title
58
74
  )
@@ -139,7 +139,7 @@ def create_actions(source_dir:str,params:Dict[str,str]):
139
139
  "000_example": base_000_example.prompt(),
140
140
  }
141
141
  init_file_path = os.path.join(source_dir, "actions", "101_current_work.yml")
142
- with open(init_file_path, "w") as f:
142
+ with open(init_file_path, "w", encoding="utf-8") as f:
143
143
  f.write(init_command_template.prompt(source_dir=source_dir))
144
144
 
145
145
  for k,v in mapping.items():
@@ -152,7 +152,7 @@ def create_actions(source_dir:str,params:Dict[str,str]):
152
152
  if k == "000_example":
153
153
  file_path = os.path.join(source_dir, "actions", f"{k}.yml")
154
154
 
155
- with open(file_path, "w") as f:
155
+ with open(file_path, "w", encoding="utf-8") as f:
156
156
  f.write(v)
157
157
 
158
158
  @byzerllm.prompt()
@@ -0,0 +1,105 @@
1
+ import os
2
+ import json
3
+ import shutil
4
+ from loguru import logger
5
+ from autocoder.common.printer import Printer
6
+ from autocoder.common.result_manager import ResultManager
7
+
8
+ result_manager = ResultManager()
9
+
10
+ def export_conf(project_root: str, export_path: str) -> bool:
11
+ printer = Printer()
12
+ """
13
+ Export conf from memory.json to a specified directory
14
+
15
+ Args:
16
+ project_root: Project root directory
17
+ export_path: Path to export the conf file
18
+
19
+ Returns:
20
+ bool: True if successful, False otherwise
21
+ """
22
+ project_root = os.path.abspath(project_root) or os.getcwd()
23
+ try:
24
+ memory_path = os.path.join(project_root, ".auto-coder", "plugins", "chat-auto-coder", "memory.json")
25
+ if not os.path.exists(memory_path):
26
+ printer.print_in_terminal("conf_not_found", path=memory_path)
27
+ return False
28
+
29
+ # Read and extract conf
30
+ with open(memory_path, "r",encoding="utf-8") as f:
31
+ memory_data = json.load(f)
32
+
33
+ conf_data = memory_data.get("conf", {})
34
+
35
+ # Write to export location
36
+ export_file = os.path.join(export_path, "conf.json")
37
+ os.makedirs(export_path, exist_ok=True)
38
+ with open(export_file, "w",encoding="utf-8") as f:
39
+ json.dump(conf_data, f, indent=2)
40
+ printer.print_in_terminal("conf_export_success", path=export_file)
41
+ result_manager.add_result(content=printer.get_message_from_key_with_format("conf_export_success", path=export_file), meta={"action": "conf_export", "input": {
42
+ "path": export_file
43
+ }})
44
+ return True
45
+
46
+ except Exception as e:
47
+ result_manager.add_result(content=printer.get_message_from_key_with_format("conf_export_error", error=str(e)), meta={"action": "conf_export", "input": {
48
+ "path": export_file
49
+ }})
50
+ printer.print_in_terminal("conf_export_error", error=str(e))
51
+ return False
52
+
53
+
54
+ def import_conf(project_root: str, import_path: str) -> bool:
55
+ project_root = os.path.abspath(project_root) or os.getcwd()
56
+ printer = Printer()
57
+ """
58
+ Import conf from a specified directory into memory.json
59
+
60
+ Args:
61
+ project_root: Project root directory
62
+ import_path: Path containing the conf file to import
63
+
64
+ Returns:
65
+ bool: True if successful, False otherwise
66
+ """
67
+ try:
68
+ import_file = os.path.join(import_path, "conf.json")
69
+ if not os.path.exists(import_file):
70
+ printer.print_in_terminal("conf_not_found", path=import_file)
71
+ return False
72
+
73
+ # Read conf file
74
+ with open(import_file, "r",encoding="utf-8") as f:
75
+ conf_data = json.load(f)
76
+
77
+ # Backup existing memory
78
+ memory_path = os.path.join(project_root, ".auto-coder", "plugins", "chat-auto-coder", "memory.json")
79
+ if os.path.exists(memory_path):
80
+ backup_path = memory_path + ".bak"
81
+ shutil.copy2(memory_path, backup_path)
82
+ printer.print_in_terminal("conf_backup_success", path=backup_path)
83
+
84
+ # Update conf in memory
85
+ with open(memory_path, "r",encoding="utf-8") as f:
86
+ memory_data = json.load(f)
87
+
88
+ memory_data["conf"] = conf_data
89
+
90
+ # Write updated memory
91
+ with open(memory_path, "w",encoding="utf-8") as f:
92
+ json.dump(memory_data, f, indent=2)
93
+
94
+ printer.print_in_terminal("conf_import_success", path=memory_path)
95
+ result_manager.add_result(content=printer.get_message_from_key_with_format("conf_import_success", path=memory_path), meta={"action": "conf_import", "input": {
96
+ "path": memory_path
97
+ }})
98
+ return True
99
+
100
+ except Exception as e:
101
+ result_manager.add_result(content=printer.get_message_from_key_with_format("conf_import_error", error=str(e)), meta={"action": "conf_import", "input": {
102
+ "path": memory_path
103
+ }})
104
+ printer.print_in_terminal("conf_import_error", error=str(e))
105
+ return False
@@ -142,7 +142,7 @@ class ConfigValidator:
142
142
  }
143
143
 
144
144
  @classmethod
145
- def validate(cls, key: str, value: Any, product_mode: str) -> Any:
145
+ def validate(cls, key: str, value: Any, product_mode: str) -> Any:
146
146
  # 获取配置规范
147
147
  spec = cls.CONFIG_SPEC.get(key)
148
148
  if not spec:
autocoder/common/files.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from autocoder.common.auto_coder_lang import get_message_with_format
2
- from typing import List, Dict, Union
2
+ from typing import List, Dict, Union, Generator, Tuple
3
3
 
4
4
  def read_file(file_path):
5
5
  """Read a file with automatic encoding detection.
@@ -30,6 +30,45 @@ def read_file(file_path):
30
30
  file_path=file_path,
31
31
  encodings=", ".join(encodings)))
32
32
 
33
+ def read_lines(file_path:str):
34
+ encodings = ['utf-8', 'gbk', 'utf-16', 'latin-1']
35
+ for encoding in encodings:
36
+ try:
37
+ with open(file_path, 'r', encoding=encoding) as f:
38
+ return f.readlines()
39
+ except UnicodeDecodeError:
40
+ continue
41
+ raise ValueError(get_message_with_format("file_decode_error",
42
+ file_path=file_path,
43
+ encodings=", ".join(encodings)))
44
+
45
+
46
+
47
+ def read_file_with_line_numbers(file_path: str,line_number_start:int=0) -> Generator[Tuple[int, str], None, None]:
48
+ """Read a file and return its content with line numbers.
49
+
50
+ Args:
51
+ file_path (str): Path to the file to read
52
+
53
+ Returns:
54
+ List[str]: A list of strings where each string is in the format "line_number:line_content"
55
+
56
+ Raises:
57
+ ValueError: If the file cannot be decoded with any of the tried encodings
58
+ """
59
+ encodings = ['utf-8', 'gbk', 'utf-16', 'latin-1']
60
+
61
+ for encoding in encodings:
62
+ try:
63
+ with open(file_path, 'r', encoding=encoding) as file:
64
+ for line_number, line in enumerate(file, start=line_number_start):
65
+ yield (line_number, line)
66
+ except UnicodeDecodeError:
67
+ continue
68
+
69
+ raise ValueError(get_message_with_format("file_decode_error",
70
+ file_path=file_path,
71
+ encodings=", ".join(encodings)))
33
72
 
34
73
 
35
74
  def save_file(file_path: str, content: Union[str, List[str]]) -> None:
@@ -55,4 +94,4 @@ def save_file(file_path: str, content: Union[str, List[str]]) -> None:
55
94
  except IOError as e:
56
95
  raise IOError(get_message_with_format("file_write_error",
57
96
  file_path=file_path,
58
- error=str(e)))
97
+ error=str(e)))