auto-coder 0.1.243__py3-none-any.whl → 0.1.245__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.243
3
+ Version: 0.1.245
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter-client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas] >=0.1.159
29
+ Requires-Dist: byzerllm[saas] >=0.1.161
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff-match-patch
32
32
  Requires-Dist: GitPython
@@ -1,21 +1,22 @@
1
1
  autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autocoder/auto_coder.py,sha256=iQeyrg5L87IgBLFJLZLIJetHFeMuT2uslRlzo_Vjq9s,61829
2
+ autocoder/auto_coder.py,sha256=832zmnXmZ7RlqsPnuEj-2IuhsJI0QkCGHJICH6QRCAY,63333
3
3
  autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
4
4
  autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
8
8
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
9
- autocoder/chat_auto_coder.py,sha256=s9uMjDQQawXEsq171GO7SVMO4fDjWd4xWT0KYO7nRp4,105295
10
- autocoder/chat_auto_coder_lang.py,sha256=V-VIieyKF5cwlK448B1V2LUbTdrU03tfgDrOk2aBvFk,14891
9
+ autocoder/chat_auto_coder.py,sha256=SLJQzXuQoj_2mcdbANG93ZM2-wIQsptA_h3VRv5xZAQ,105926
10
+ autocoder/chat_auto_coder_lang.py,sha256=gbpjfMd1wYiIrOlLDc-G7eI497mMwjM_ud9GvO-wo9k,15261
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
- autocoder/models.py,sha256=FlBrF6HhGao_RiCSgYhCmP7vs0KlG4hI_BI6dyZiL9s,5292
14
- autocoder/version.py,sha256=u0hWeuFclX3Z9nFe5oFsCdX854VeUHOs69Ggv1pvBvk,23
13
+ autocoder/models.py,sha256=7Z97Hzc_26dZG_wm6M2f9TL1ZxzzIN649U_Z0-m28EU,5342
14
+ autocoder/version.py,sha256=sncGdxYQvG5ZX4oQL4xUEZ96_LS3u1YIOVwACJUeMF4,23
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
18
18
  autocoder/agent/auto_guess_query.py,sha256=rDSdhpPHcOGE5MuDXvIrhCXAPR4ARS1LqpyoLsx2Jhw,11374
19
+ autocoder/agent/auto_review_commit.py,sha256=3_XgcOGNbpOnwq9WMj66rceZFEa4lEQvs9OZdKQi7co,7663
19
20
  autocoder/agent/auto_tool.py,sha256=DBzip-P_T6ZtT2eHexPcusmKYD0h7ufzp7TLwXAY10E,11554
20
21
  autocoder/agent/coder.py,sha256=x6bdJwDuETGg9ebQnYlUWCxCtQcDGg73LtI6McpWslQ,72034
21
22
  autocoder/agent/designer.py,sha256=EpRbzO58Xym3GrnppIT1Z8ZFAlnNfgzHbIzZ3PX-Yv8,27037
@@ -28,7 +29,7 @@ autocoder/common/__init__.py,sha256=2isE_u4VgfogwmcUCnFcussVFlzeNOLHDMFm5z_axbU,
28
29
  autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
29
30
  autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
30
31
  autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
31
- autocoder/common/auto_coder_lang.py,sha256=9FBNhcl6Do4ICh-klevYsCTsDuy5kD99r8EE5Gs1QoM,12592
32
+ autocoder/common/auto_coder_lang.py,sha256=7sr3Dz43ASeWYLtMvkE6tMd8dWDPZBJOxNVQ8rC54Js,13963
32
33
  autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
33
34
  autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
34
35
  autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
@@ -46,7 +47,7 @@ autocoder/common/command_completer.py,sha256=SSeb8MDH0JPvfdyW-S2uaHnui4VBDfSQvQP
46
47
  autocoder/common/command_generator.py,sha256=v4LmU7sO-P7jEZIXCWHUC6P-vT7AvBi_x_PTwCqBAE8,1323
47
48
  autocoder/common/command_templates.py,sha256=mnB3n8i0yjH1mqzyClEg8Wpr9VbZV44kxky66Zu6OJY,8557
48
49
  autocoder/common/const.py,sha256=eTjhjh4Aj4CUzviJ81jaf3Y5cwqsLATySn2wJxaS6RQ,2911
49
- autocoder/common/files.py,sha256=uGpfKASYwIncK_Vt_e_FOjFlO5VyAQOnRJe2SFdSWrg,877
50
+ autocoder/common/files.py,sha256=CguxG9digkWBJpRaILErZmL_G5ryPRahPmPFWGB7X18,1973
50
51
  autocoder/common/git_utils.py,sha256=btK45sxvfm4tX3fBRNUPRZoGQuZuOEQrWSAwLy1yoLw,23095
51
52
  autocoder/common/image_to_page.py,sha256=O0cNO_vHHUP-fP4GXiVojShmNqkPnZXeIyiY1MRLpKg,13936
52
53
  autocoder/common/interpreter.py,sha256=62-dIakOunYB4yjmX8SHC0Gdy2h8NtxdgbpdqRZJ5vk,2833
@@ -77,14 +78,14 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
77
78
  autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ht_HWzZt84IEogoFMggnXI6aFFerrsuksVflAkcodfU,5545
78
79
  autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
79
80
  autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
80
- autocoder/index/entry.py,sha256=KJaxqtaKgL27w8-j7OiAqI0anPpmrJSl7PkfeVF2ipE,11713
81
+ autocoder/index/entry.py,sha256=oFdSJW4ypc3_mhE3fk2O9UB34XMcq3a1Sp2MeTlfQ2o,11820
81
82
  autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
82
- autocoder/index/index.py,sha256=8AcaELR1FS___7VlNyxPnJsDVQ4wjORbqXvcA6TifCE,20337
83
+ autocoder/index/index.py,sha256=VjfcBYHywU4tjQTA7mpHfzRM8nBPhPHrUnkuBbsj6do,20409
83
84
  autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
84
85
  autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
85
86
  autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
- autocoder/index/filter/normal_filter.py,sha256=pE5QwcBq6NYHFtYhwhfMJmYQYJwErNs-Q7iZmVBAh-k,7964
87
- autocoder/index/filter/quick_filter.py,sha256=Po32nJFAbAwb7kMaNkCrL5-ZjVE-Pobm5wzXyw9Y8iE,3882
87
+ autocoder/index/filter/normal_filter.py,sha256=V0MAUKgEG9vVTwZK5lMfpZCjU57S6cBioeHDjog0kLs,7992
88
+ autocoder/index/filter/quick_filter.py,sha256=Omvsz9O1xQEH4xP-wNuCZhxn69P7Y59SiLPUIDuGFiA,3851
88
89
  autocoder/pyproject/__init__.py,sha256=dQ2_7YZ7guybT9BhfxSGn43eLQJGQN2zgeKa6--JlaQ,14403
89
90
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
90
91
  autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
@@ -119,7 +120,7 @@ autocoder/rag/stream_event/event_writer.py,sha256=l7kq_LnDDE8E5dZ-73C7J2MgzSL7Wr
119
120
  autocoder/rag/stream_event/types.py,sha256=rtLwOE8rShmi1dJdxyBpAV5ZjLBGG9vptMiSzMxGuIA,318
120
121
  autocoder/regex_project/__init__.py,sha256=EBZeCL5ORyD_9_5u_UuG4s7XtpXOu0y1sWDmxWFtufE,6781
121
122
  autocoder/regexproject/__init__.py,sha256=cEr-ZOaQjLD5sx7T7F2DhD5ips03HcJ02rded9EpSXc,9693
122
- autocoder/suffixproject/__init__.py,sha256=cmP54Y01ditZ83tiJqw5wle0I-uJBC0aZbZ7lYNSVO8,11080
123
+ autocoder/suffixproject/__init__.py,sha256=VcXjUbGf3uQrpoqVCItDvGG9DoeHJ_qEmghKwrVNw9w,11058
123
124
  autocoder/tsproject/__init__.py,sha256=yloVzkGLnbTd4Hcj9fMO-rcjNTTx4wI3Ga41LWOSYrY,11747
124
125
  autocoder/utils/__init__.py,sha256=KtcGElFNBgZPF7dEL8zF9JpXkCAjoyDrzaREJBhJrcs,994
125
126
  autocoder/utils/_markitdown.py,sha256=RU88qn4eZfYIy0GDrPxlI8oYXIypbi63VRJjdlnE0VU,47431
@@ -134,15 +135,15 @@ autocoder/utils/print_table.py,sha256=ZMRhCA9DD0FUfKyJBWd5bDdj1RrtPtgOMWSJwtvZcL
134
135
  autocoder/utils/queue_communicate.py,sha256=buyEzdvab1QA4i2QKbq35rG5v_9x9PWVLWWMTznWcYM,6832
135
136
  autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1vmRciKdi4,2095
136
137
  autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
137
- autocoder/utils/rest.py,sha256=opE_kBEdNQdxh350M5lUTMk5TViRfpuKP_qWc0B1lks,8861
138
+ autocoder/utils/rest.py,sha256=hLBhr78y-WVnV0oQf9Rxc22EwqF78KINkScvYa1MuYA,6435
138
139
  autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
139
140
  autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
140
141
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
141
- autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=fcXusKEUKMu8WY9Y1_JL5aPkC-soKFxQcFAKThrNZoQ,13338
142
+ autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=8OV1VOdbj8O7JXDrBUhXVPmMkoCd9n-hvshtR2XXYxk,9112
142
143
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
143
- auto_coder-0.1.243.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
144
- auto_coder-0.1.243.dist-info/METADATA,sha256=zq_UlYzkagreMYmIrWVkCmf43Zr3_jIX1w577MlSXQE,2616
145
- auto_coder-0.1.243.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
146
- auto_coder-0.1.243.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
147
- auto_coder-0.1.243.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
148
- auto_coder-0.1.243.dist-info/RECORD,,
144
+ auto_coder-0.1.245.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
145
+ auto_coder-0.1.245.dist-info/METADATA,sha256=OtK6YNCP5q11TSBcEWYhfDeljzTN7OqNYW4ze_mggmk,2616
146
+ auto_coder-0.1.245.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
147
+ auto_coder-0.1.245.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
148
+ auto_coder-0.1.245.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
149
+ auto_coder-0.1.245.dist-info/RECORD,,
@@ -0,0 +1,207 @@
1
+ from typing import Generator, List, Dict, Union, Tuple, Optional
2
+ import os
3
+ import yaml
4
+ import byzerllm
5
+ import pydantic
6
+ import git
7
+ from rich.console import Console
8
+ from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
9
+ from autocoder.common.printer import Printer
10
+ from autocoder.common import AutoCoderArgs
11
+ from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
12
+
13
+
14
+ def load_yaml_config(yaml_file: str) -> Dict:
15
+ """加载YAML配置文件"""
16
+ try:
17
+ with open(yaml_file, 'r', encoding='utf-8') as f:
18
+ return yaml.safe_load(f)
19
+ except Exception as e:
20
+ printer = Printer()
21
+ printer.print_in_terminal("yaml_load_error", style="red", yaml_file=yaml_file, error=str(e))
22
+ return {}
23
+
24
+
25
+ class AutoReviewCommit:
26
+ def __init__(self, llm: Union[byzerllm.ByzerLLM,byzerllm.SimpleByzerLLM],
27
+ args:AutoCoderArgs,
28
+ skip_diff: bool = False,
29
+ console: Optional[Console] = None):
30
+ """
31
+ 初始化 AutoReviewCommit
32
+
33
+ Args:
34
+ llm: ByzerLLM 实例,用于代码审查
35
+ project_dir: 项目根目录
36
+ skip_diff: 是否跳过获取 diff 信息
37
+ """
38
+ self.project_dir = args.source_dir
39
+ self.actions_dir = os.path.join(args.source_dir, "actions")
40
+ self.llm = llm
41
+ self.skip_diff = skip_diff
42
+ self.console = console or Console()
43
+
44
+ @byzerllm.prompt()
45
+ def review(self, querie_with_urls_and_diffs: List[Tuple[str, List[str], str]], query: str) -> Generator[str,None,None]:
46
+ """
47
+ 如果前面我们对话提供了文档,请参考上面的文档对提交的代码变更进行审查,提供改进建议。
48
+
49
+ 下面包含最新一次提交的信息:
50
+ <commit>
51
+ {% for query,urls,diff in querie_with_urls_and_diffs %}
52
+ ## 任务需求
53
+ {{ query }}
54
+
55
+ 修改的文件:
56
+ {% for url in urls %}
57
+ - {{ url }}
58
+ {% endfor %}
59
+
60
+ 代码变更:
61
+ ```diff
62
+ {{ diff }}
63
+ ```
64
+ {% endfor %}
65
+ </commit>
66
+
67
+ 审查要求:
68
+ 1. 代码质量评估
69
+ - 代码可读性:命名、注释、代码结构是否清晰
70
+ - 代码风格:是否符合项目规范
71
+ - 实现逻辑:算法和数据结构的选择是否合适
72
+
73
+ 2. 潜在问题检查
74
+ - 常见错误:是否存在空指针,数组越界,类型转换,未声明变量,typo等低级错误
75
+ - 安全性:是否存在安全隐患
76
+ - 性能:是否有性能问题
77
+ - 并发:是否有并发安全问题
78
+ - 异常处理:错误处理是否完善
79
+ - 资源管理:是否有资源泄露风险
80
+
81
+ 3. 架构合理性
82
+ - 模块化:职责划分是否合理
83
+ - 可扩展性:是否方便未来扩展
84
+ - 依赖关系:组件耦合是否合理
85
+ - 复用性:是否有重复代码
86
+
87
+ 返回格式说明:
88
+ 返回 markdown 格式,包含以下内容:
89
+ 1. issues: 发现的具体问题列表
90
+ 2. suggestions: 对应的改进建议列表
91
+ 3. severity: 问题的严重程度(low/medium/high)
92
+ 4. affected_files: 受影响的文件列表
93
+ 5. summary: 总体评价
94
+
95
+ {% if query %}
96
+ 用户额外 review 需求:
97
+ <user_review_requirement>
98
+ {{ query }}
99
+ </user_review_requirement>
100
+ {% endif %}
101
+
102
+ 注意:
103
+ 1. 评审意见应该具体且可操作,而不是泛泛而谈
104
+ 2. 对于每个问题都应该提供明确的改进建议
105
+ 3. 严重程度的判断要考虑问题对系统的潜在影响
106
+ 4. 建议应该符合项目的技术栈和开发规范
107
+ """
108
+ pass
109
+
110
+
111
+ def parse_history_tasks(self) -> List[Dict]:
112
+ """
113
+ 解析历史任务信息
114
+
115
+ Returns:
116
+ List[Dict]: 每个字典包含一个历史任务的信息
117
+ """
118
+ # 获取所有YAML文件
119
+ action_files = [
120
+ f for f in os.listdir(self.actions_dir)
121
+ if f[:3].isdigit() and "_" in f and f.endswith('.yml')
122
+ ]
123
+
124
+ # 按序号排序
125
+ def get_seq(name):
126
+ return int(name.split("_")[0])
127
+
128
+ # 获取最新的action文件列表
129
+ action_files = sorted(action_files, key=get_seq)
130
+ action_files.reverse()
131
+
132
+ action_file = action_files[0]
133
+
134
+ querie_with_urls_and_diffs = []
135
+ repo = git.Repo(self.project_dir)
136
+
137
+ # 收集所有query、urls和对应的commit diff
138
+ for yaml_file in [action_file]:
139
+ yaml_path = os.path.join(self.actions_dir, yaml_file)
140
+ config = load_yaml_config(yaml_path)
141
+
142
+ if not config:
143
+ continue
144
+
145
+ query = config.get('query', '')
146
+ urls = config.get('urls', [])
147
+
148
+ if query and urls:
149
+ commit_diff = ""
150
+ if not self.skip_diff:
151
+ # 计算文件的MD5用于匹配commit
152
+ import hashlib
153
+ file_md5 = hashlib.md5(open(yaml_path, 'rb').read()).hexdigest()
154
+ response_id = f"auto_coder_{yaml_file}_{file_md5}"
155
+ # 查找对应的commit
156
+ try:
157
+ for commit in repo.iter_commits():
158
+ if response_id in commit.message:
159
+ if commit.parents:
160
+ parent = commit.parents[0]
161
+ commit_diff = repo.git.diff(
162
+ parent.hexsha, commit.hexsha)
163
+ else:
164
+ commit_diff = repo.git.show(commit.hexsha)
165
+ break
166
+ except git.exc.GitCommandError as e:
167
+ printer = Printer()
168
+ printer.print_in_terminal("git_command_error", style="red", error=str(e))
169
+ except Exception as e:
170
+ printer = Printer()
171
+ printer.print_in_terminal("get_commit_diff_error", style="red", error=str(e))
172
+
173
+ querie_with_urls_and_diffs.append((query, urls, commit_diff))
174
+
175
+ return querie_with_urls_and_diffs
176
+
177
+
178
+ def review_commit(self,query: str, conversations: List[Dict]) -> Generator[str,None,None]:
179
+ """
180
+ 审查最新的代码提交
181
+
182
+ Returns:
183
+ Optional[ReviewResult]: 审查结果,如果出错则返回None
184
+ """
185
+ printer = Printer()
186
+ # 获取最新的提交信息
187
+ commits = self.parse_history_tasks()
188
+ if not commits:
189
+ printer.print_in_terminal("no_latest_commit", style="red")
190
+ return None
191
+
192
+ # 调用LLM进行代码审查
193
+ try:
194
+ # 获取 prompt 内容
195
+ query = self.review.prompt(commits, query)
196
+ new_conversations = conversations.copy()[0:-1]
197
+ new_conversations.append({"role": "user", "content": query})
198
+ # 构造对话消息
199
+ v = stream_chat_with_continue(
200
+ llm=self.llm,
201
+ conversations=new_conversations,
202
+ llm_config={}
203
+ )
204
+ return v
205
+ except Exception as e:
206
+ printer.print_in_terminal("code_review_error", style="red", error=str(e))
207
+ return None
autocoder/auto_coder.py CHANGED
@@ -278,8 +278,13 @@ def main(input_args: Optional[List[str]] = None):
278
278
 
279
279
  llm = byzerllm.ByzerLLM(verbose=args.print_request)
280
280
 
281
+ # code_model,index_filter_model,generate_rerank_model,chat_model
282
+ # 这四个模型如果用户没有设置,就会使用默认的
283
+ # 如果用户随便填写 deepseek 官方key,就会导致 Authentic(No User) 的错误
284
+ # 或者 Insuffient Balance 之类的错误
285
+
281
286
  code_model = byzerllm.ByzerLLM()
282
- code_model.setup_default_model_name("deepseek_chat")
287
+ code_model.setup_default_model_name(args.model)
283
288
  llm.setup_sub_client("code_model", code_model)
284
289
 
285
290
  index_filter_model = byzerllm.ByzerLLM()
@@ -295,7 +300,8 @@ def main(input_args: Optional[List[str]] = None):
295
300
  llm.setup_sub_client("chat_model", chat_model)
296
301
 
297
302
  if args.product_mode == "lite":
298
- llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_chat")
303
+ default_model = args.model
304
+ llm = byzerllm.SimpleByzerLLM(default_model_name=default_model)
299
305
  api_key_dir = os.path.expanduser("~/.auto-coder/keys")
300
306
  api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
301
307
 
@@ -308,7 +314,7 @@ def main(input_args: Optional[List[str]] = None):
308
314
  llm.deploy(
309
315
  model_path="",
310
316
  pretrained_model_type="saas/openai",
311
- udf_name="deepseek_chat",
317
+ udf_name=default_model,
312
318
  infer_params={
313
319
  "saas.base_url": "https://api.deepseek.com/v1",
314
320
  "saas.api_key": api_key,
@@ -317,11 +323,11 @@ def main(input_args: Optional[List[str]] = None):
317
323
  }
318
324
  )
319
325
 
320
- code_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_chat")
326
+ code_llm = byzerllm.SimpleByzerLLM(default_model)
321
327
  code_llm.deploy(
322
328
  model_path="",
323
329
  pretrained_model_type="saas/openai",
324
- udf_name="deepseek_chat",
330
+ udf_name=default_model,
325
331
  infer_params={
326
332
  "saas.base_url": "https://api.deepseek.com/v1",
327
333
  "saas.api_key": api_key,
@@ -368,7 +374,8 @@ def main(input_args: Optional[List[str]] = None):
368
374
  "saas.is_reasoning": True
369
375
  }
370
376
  )
371
-
377
+
378
+ # 这四个模型如果用户没有设置,就会使用默认的
372
379
  llm.setup_sub_client("code_model", code_llm)
373
380
  llm.setup_sub_client("chat_model", chat_llm)
374
381
  llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
@@ -692,6 +699,23 @@ def main(input_args: Optional[List[str]] = None):
692
699
  )
693
700
  llm.setup_sub_client("vl_model", vl_model)
694
701
 
702
+ if args.index_model:
703
+ model_name = args.index_model.strip()
704
+ model_info = models_module.get_model_by_name(model_name)
705
+ index_model = byzerllm.SimpleByzerLLM(default_model_name=model_name)
706
+ index_model.deploy(
707
+ model_path="",
708
+ pretrained_model_type="saas/openai",
709
+ udf_name=model_name,
710
+ infer_params={
711
+ "saas.base_url": model_info["base_url"],
712
+ "saas.api_key": model_info["api_key"],
713
+ "saas.model": model_info["model_name"],
714
+ "saas.is_reasoning": model_info["is_reasoning"]
715
+ }
716
+ )
717
+ llm.setup_sub_client("index_model", index_model)
718
+
695
719
  if args.sd_model:
696
720
  model_name = args.sd_model.strip()
697
721
  model_info = models_module.get_model_by_name(model_name)
@@ -1332,6 +1356,10 @@ def main(input_args: Optional[List[str]] = None):
1332
1356
  )
1333
1357
  )
1334
1358
  v = [[response.result,None]]
1359
+ elif "review_commit" in args.action:
1360
+ from autocoder.agent.auto_review_commit import AutoReviewCommit
1361
+ reviewer = AutoReviewCommit(llm=chat_llm, args=args)
1362
+ v = reviewer.review_commit(args.query)
1335
1363
  else:
1336
1364
  v = stream_chat_with_continue(
1337
1365
  llm=chat_llm,
@@ -1605,8 +1605,8 @@ def code_next(query: str):
1605
1605
  if os.path.exists(temp_yaml):
1606
1606
  os.remove(temp_yaml)
1607
1607
 
1608
- llm = byzerllm.ByzerLLM.from_default_model(
1609
- args.inference_model or args.model)
1608
+ product_mode = conf.get("product_mode", "lite")
1609
+ llm = get_single_llm(args.chat_model or args.model, product_mode=product_mode)
1610
1610
 
1611
1611
  auto_guesser = AutoGuessQuery(
1612
1612
  llm=llm, project_dir=os.getcwd(), skip_diff=True)
@@ -1937,14 +1937,18 @@ def chat(query: str):
1937
1937
  if "/save" in query:
1938
1938
  yaml_config["action"].append("save")
1939
1939
  query = query.replace("/save", "", 1).strip()
1940
-
1941
- is_review = query.strip().startswith("/review")
1942
- if is_review:
1943
- query = query.replace("/review", "", 1).strip()
1944
- if "prompt_review" in conf:
1945
- query = format_str_jinja2(conf["prompt_review"], query=query)
1946
- else:
1947
- query = code_review.prompt(query)
1940
+
1941
+ if "/review" in query and "/commit" in query:
1942
+ yaml_config["action"].append("review_commit")
1943
+ query = query.replace("/review", "", 1).replace("/commit", "", 1).strip()
1944
+ else:
1945
+ is_review = query.strip().startswith("/review")
1946
+ if is_review:
1947
+ query = query.replace("/review", "", 1).strip()
1948
+ if "prompt_review" in conf:
1949
+ query = format_str_jinja2(conf["prompt_review"], query=query)
1950
+ else:
1951
+ query = code_review.prompt(query)
1948
1952
 
1949
1953
  is_no_context = query.strip().startswith("/no_context")
1950
1954
  if is_no_context:
@@ -2194,13 +2198,14 @@ def manage_models(params, query: str):
2194
2198
  table.add_column("Base URL", style="white", width=50, overflow="fold")
2195
2199
  for m in models_data:
2196
2200
  # Check if api_key_path exists and file exists
2197
- api_key_path = m.get("api_key_path", "")
2198
- name = m.get("name", "")
2199
- if api_key_path:
2200
- api_key_file = os.path.expanduser(f"~/.auto-coder/keys/{api_key_path}")
2201
- if os.path.exists(api_key_file):
2202
- name = f"{name}*"
2203
-
2201
+ is_api_key_set = "api_key" in m
2202
+ name = m.get("name", "")
2203
+ if is_api_key_set:
2204
+ api_key = m.get("api_key", "").strip()
2205
+ if not api_key:
2206
+ printer.print_in_terminal("models_api_key_empty", style="yellow", name=name)
2207
+ name = f"{name} *"
2208
+
2204
2209
  table.add_row(
2205
2210
  name,
2206
2211
  m.get("model_name", ""),
@@ -2700,6 +2705,11 @@ def main():
2700
2705
  if "mode" not in memory:
2701
2706
  memory["mode"] = "normal"
2702
2707
 
2708
+ # 处理 user_input 的空格
2709
+ temp_user_input = user_input.lstrip() # 去掉左侧空格
2710
+ if temp_user_input.startswith('/'):
2711
+ user_input = temp_user_input
2712
+
2703
2713
  if (
2704
2714
  memory["mode"] == "auto_detect"
2705
2715
  and user_input
@@ -1,4 +1,5 @@
1
1
  import locale
2
+ from byzerllm.utils import format_str_jinja2
2
3
 
3
4
  MESSAGES = {
4
5
  "en": {
@@ -110,7 +111,8 @@ MESSAGES = {
110
111
  "remove_files_all": "Removed all files.",
111
112
  "remove_files_removed": "Removed Files",
112
113
  "remove_files_none": "No files were removed.",
113
- "files_removed": "Files Removed"
114
+ "files_removed": "Files Removed",
115
+ "models_api_key_empty": "Warning : {{name}} API key is empty. Please set a valid API key.",
114
116
  },
115
117
  "zh": {
116
118
  "mcp_remove_error": "移除 MCP 服务器时出错:{error}",
@@ -221,7 +223,8 @@ MESSAGES = {
221
223
  "remove_files_all": "已移除所有文件。",
222
224
  "remove_files_removed": "已移除的文件",
223
225
  "remove_files_none": "没有文件被移除。",
224
- "files_removed": "移除的文件"
226
+ "files_removed": "移除的文件",
227
+ "models_api_key_empty": "警告: {{name}} API key 为空。请设置一个有效的 API key。",
225
228
  }
226
229
  }
227
230
 
@@ -236,3 +239,7 @@ def get_system_language():
236
239
  def get_message(key):
237
240
  lang = get_system_language()
238
241
  return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
242
+
243
+
244
+ def get_message_with_format(msg_key: str, **kwargs):
245
+ return format_str_jinja2(get_message(msg_key), **kwargs)
@@ -1,9 +1,18 @@
1
1
  import locale
2
+ from byzerllm.utils import format_str_jinja2
2
3
 
3
4
  MESSAGES = {
4
- "en": {
5
+ "en": {
6
+ "model_not_found": "Model {{model_name}} not found",
5
7
  "new_session_started": "New session started. Previous chat history has been archived.",
6
8
  "memory_save_success": "✅ Saved to your memory",
9
+ "file_decode_error": "Failed to decode file: {{file_path}}. Tried encodings: {{encodings}}",
10
+ "file_write_error": "Failed to write file: {{file_path}}. Error: {{error}}",
11
+ "yaml_load_error": "Error loading yaml file {{yaml_file}}: {{error}}",
12
+ "git_command_error": "Git command execution error: {{error}}",
13
+ "get_commit_diff_error": "Error getting commit diff: {{error}}",
14
+ "no_latest_commit": "Unable to get latest commit information",
15
+ "code_review_error": "Code review process error: {{error}}",
7
16
  "index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
8
17
  "index_update_success": "✅ Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
9
18
  "index_build_error": "❌ Error building index for {{ file_path }}: {{ error }}",
@@ -73,8 +82,16 @@ MESSAGES = {
73
82
  "git_init_required": "⚠️ auto_merge only applies to git repositories.\n\nPlease try using git init in the source directory:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\nThen run auto - coder again.\nError: {{ error }}"
74
83
  },
75
84
  "zh": {
85
+ "model_not_found": "未找到模型: {{model_name}}",
76
86
  "new_session_started": "新会话已开始。之前的聊天历史已存档。",
77
87
  "memory_save_success": "✅ 已保存到您的记忆中",
88
+ "file_decode_error": "无法解码文件: {{file_path}}。尝试的编码: {{encodings}}",
89
+ "file_write_error": "无法写入文件: {{file_path}}. 错误: {{error}}",
90
+ "yaml_load_error": "加载YAML文件出错 {{yaml_file}}: {{error}}",
91
+ "git_command_error": "Git命令执行错误: {{error}}",
92
+ "get_commit_diff_error": "获取commit diff时出错: {{error}}",
93
+ "no_latest_commit": "无法获取最新的提交信息",
94
+ "code_review_error": "代码审查过程出错: {{error}}",
78
95
  "index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
79
96
  "index_update_success": "✅ 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
80
97
  "index_build_error": "❌ 构建 {{ file_path }} 索引时出错: {{ error }}",
@@ -125,7 +142,7 @@ MESSAGES = {
125
142
  "merge_success": "✅ 成功合并了 {{ num_files }} 个文件中的更改 {{ num_changes }}/{{ total_blocks }} 个代码块。",
126
143
  "no_changes_made": "⚠️ 未对任何文件进行更改。",
127
144
  "unmerged_blocks_title": "未合并代码块",
128
- "unmerged_file_path": "文件: {file_path}",
145
+ "unmerged_file_path": "文件: {{file_path}}",
129
146
  "unmerged_search_block": "Search Block({{similarity}}):",
130
147
  "unmerged_replace_block": "Replace Block:",
131
148
  "unmerged_blocks_total": "未合并代码块数量: {{num_blocks}}",
@@ -156,3 +173,6 @@ def get_system_language():
156
173
  def get_message(key):
157
174
  lang = get_system_language()
158
175
  return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
176
+
177
+ def get_message_with_format(msg_key: str, **kwargs):
178
+ return format_str_jinja2(get_message(msg_key), **kwargs)
autocoder/common/files.py CHANGED
@@ -1,3 +1,6 @@
1
+ from autocoder.common.auto_coder_lang import get_message_with_format
2
+ from typing import List, Dict, Union
3
+
1
4
  def read_file(file_path):
2
5
  """Read a file with automatic encoding detection.
3
6
 
@@ -23,4 +26,33 @@ def read_file(file_path):
23
26
  except UnicodeDecodeError:
24
27
  continue
25
28
 
26
- raise ValueError(f"无法解码文件: {file_path}。尝试的编码: {', '.join(encodings)}")
29
+ raise ValueError(get_message_with_format("file_decode_error",
30
+ file_path=file_path,
31
+ encodings=", ".join(encodings)))
32
+
33
+
34
+
35
+ def save_file(file_path: str, content: Union[str, List[str]]) -> None:
36
+ """Save content to a file using UTF-8 encoding.
37
+
38
+ Args:
39
+ file_path (str): Path to the file to write
40
+ content (Union[str, List[str]]): Content to write to the file.
41
+ Can be a string or list of strings (will be joined with newlines)
42
+
43
+ Raises:
44
+ IOError: If the file cannot be written
45
+ TypeError: If content is neither str nor List[str]
46
+ """
47
+ try:
48
+ with open(file_path, 'w', encoding='utf-8') as f:
49
+ if isinstance(content, str):
50
+ f.write(content)
51
+ elif isinstance(content, list):
52
+ f.write('\n'.join(content))
53
+ else:
54
+ raise TypeError("Content must be either str or List[str]")
55
+ except IOError as e:
56
+ raise IOError(get_message_with_format("file_write_error",
57
+ file_path=file_path,
58
+ error=str(e)))
autocoder/index/entry.py CHANGED
@@ -100,13 +100,13 @@ def build_index_and_filter_files(
100
100
  })
101
101
  )
102
102
  )
103
+
104
+ if not args.skip_filter_index and args.index_filter_model:
105
+ printer.print_in_terminal("quick_filter_start", style="blue")
106
+ quick_filter = QuickFilter(index_manager,stats,sources)
107
+ final_files = quick_filter.filter(index_manager.read_index(),args.query)
103
108
 
104
- #MARK
105
- printer.print_in_terminal("quick_filter_start", style="blue")
106
- quick_filter = QuickFilter(index_manager,stats,sources)
107
- final_files = quick_filter.filter(index_manager.read_index(),args.query)
108
-
109
- if not final_files:
109
+ if not args.skip_filter_index and not args.index_filter_model:
110
110
  printer.print_in_terminal("normal_filter_start", style="blue")
111
111
  normal_filter = NormalFilter(index_manager,stats,sources)
112
112
  final_files = normal_filter.filter(index_manager.read_index(),args.query)
@@ -34,6 +34,7 @@ class NormalFilter():
34
34
  self.sources = sources
35
35
 
36
36
  def filter(self, index_items: List[IndexItem], query: str) -> Dict[str, TargetFile]:
37
+
37
38
  final_files: Dict[str, TargetFile] = {}
38
39
  if not self.args.skip_filter_index:
39
40
  if self.args.request_id and not self.args.skip_events:
@@ -125,7 +126,7 @@ class NormalFilter():
125
126
  if source.module_name == file.file_path:
126
127
  file_content = source.source_code
127
128
  try:
128
- result = self.index_manager.verify_file_relevance.with_llm(llm).with_return_type(VerifyFileRelevance).run(
129
+ result = self.index_manager.verify_file_relevance.with_llm(self.index_manager.llm).with_return_type(VerifyFileRelevance).run(
129
130
  file_content=file_content,
130
131
  query=self.args.query
131
132
  )