auto-coder 0.1.233__py3-none-any.whl → 0.1.235__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.233
3
+ Version: 0.1.235
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -39,7 +39,6 @@ Requires-Dist: real-agent
39
39
  Requires-Dist: python-docx
40
40
  Requires-Dist: docx2txt
41
41
  Requires-Dist: pdf2image
42
- Requires-Dist: Spire.Doc
43
42
  Requires-Dist: docx2pdf
44
43
  Requires-Dist: pyperclip
45
44
  Requires-Dist: colorama
@@ -1,17 +1,17 @@
1
1
  autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autocoder/auto_coder.py,sha256=r2uHo-xPDbel6OhZwGzl8Iz1GP4d9KUWcDReEol9LOQ,59541
2
+ autocoder/auto_coder.py,sha256=XOZGAJiy6CWnn7dfbGAmZwVI8KnIaSJ-FgZm8t4d0YE,61069
3
3
  autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
4
4
  autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
8
8
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
9
- autocoder/chat_auto_coder.py,sha256=PzrbhpwTgJEMuPOT7vBd4uPi58mwqLc59l2fVV6rJc8,102049
9
+ autocoder/chat_auto_coder.py,sha256=US-HXJtBkj_7QifJxQNcWgUTYe-ZB2sf0aZI8gbLN9w,102931
10
10
  autocoder/chat_auto_coder_lang.py,sha256=YJsFi8an0Kjbo9X7xKZfpdbHS3rbhrvChZNjWqEQ5Sw,11032
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
13
  autocoder/models.py,sha256=FlBrF6HhGao_RiCSgYhCmP7vs0KlG4hI_BI6dyZiL9s,5292
14
- autocoder/version.py,sha256=VphyhuzLuUXUhi7WwvwaGVGg2OSz77iY97Prah1F5g8,24
14
+ autocoder/version.py,sha256=Ha3i0TzVJUOaC3CSX9IQT4bWoilQPZfSI4LPVJpLuuQ,23
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -24,10 +24,11 @@ autocoder/agent/project_reader.py,sha256=tWLaPoLw1gI6kO_NzivQj28KbobU2ceOLuppHMb
24
24
  autocoder/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
26
26
  autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
27
- autocoder/common/__init__.py,sha256=JDIZ_1JP7Nm74OL_aCFwvhiwiynukaE5oPCfh5GPQWU,11695
27
+ autocoder/common/__init__.py,sha256=vpfo3RctksezDoraVSpHtfnxpspVNiYh8OmQhqQvcfE,11729
28
28
  autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
29
29
  autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
30
30
  autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
31
+ autocoder/common/auto_coder_lang.py,sha256=2xyc_qqClEiqtkwccH5gs3MquYwMM16DMq-kIx7Hwcc,5834
31
32
  autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
32
33
  autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
33
34
  autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
@@ -54,6 +55,7 @@ autocoder/common/mcp_hub.py,sha256=2ZyJv3Aiv4Y97UHut49oYhIFcu7ICR-mptDEBSgT3uE,1
54
55
  autocoder/common/mcp_server.py,sha256=QCFa-15kx7rbNsinwdGFFX2y47pww0fVdI-ldKFSSWI,12267
55
56
  autocoder/common/mcp_tools.py,sha256=KsLvRrB6pvmebqd-lDaSH6IBJR0AIxWRE-dtCEG_w9k,12485
56
57
  autocoder/common/memory_manager.py,sha256=2ZjYG7BPyvbYalZBF6AM_G5e10Qkw_zrqtD4Zd7GSsQ,3663
58
+ autocoder/common/printer.py,sha256=dZQ5JXLy1OxA9uoLE7dNGuiOH0xbt5xN_fsMkEJ6syw,1304
57
59
  autocoder/common/recall_validation.py,sha256=Avt9Q9dX3kG6Pf2zsdlOHmsjd-OeSj7U1PFBDp_Cve0,1700
58
60
  autocoder/common/screenshots.py,sha256=_gA-z1HxGjPShBrtgkdideq58MG6rqFB2qMUJKjrycs,3769
59
61
  autocoder/common/search.py,sha256=245iPFgWhMldoUK3CqCP89ltaxZiNPK73evoG6Fp1h8,16518
@@ -69,20 +71,20 @@ autocoder/db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
69
71
  autocoder/db/store.py,sha256=tFT66bP2ZKIqZip-uhLkHRSLaaOAUUDZfozJwcqix3c,1908
70
72
  autocoder/dispacher/__init__.py,sha256=YoA64dIxnx4jcE1pwSfg81sjkQtjDkhddkfac1-cMWo,1230
71
73
  autocoder/dispacher/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
- autocoder/dispacher/actions/action.py,sha256=NjJGLek8H0FlIOreBnl2KEC-jJ5Jq-V8D1RuI6ifUjc,19299
74
+ autocoder/dispacher/actions/action.py,sha256=KUTpbkIQaIOhdnAaGbUj2ltj12IbzssxqEVoT9YM11U,19972
73
75
  autocoder/dispacher/actions/copilot.py,sha256=iMh4ckj9hO5Q-iemF3CStXd7DatWai7Eci5zOlKxK9c,13072
74
76
  autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
- autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=-ly-NRgQ8LfDQDoH0QmD_LP-G932Kt08WXy9oIvLy10,5325
77
+ autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ht_HWzZt84IEogoFMggnXI6aFFerrsuksVflAkcodfU,5545
76
78
  autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
77
79
  autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
78
- autocoder/index/entry.py,sha256=1T41clV3GXwiRbowubQ1iZM5k3_2ECS-DtcnbA9QxAk,10081
80
+ autocoder/index/entry.py,sha256=f6_lrJLAfYslzAUY7JUCsJMhoMCIKy9ZHGBWHhmCtr8,11310
79
81
  autocoder/index/for_command.py,sha256=LGnz-OWogT8rd24m4Zcan7doLaijxqorAuiMk7WuRq0,3125
80
- autocoder/index/index.py,sha256=lwaobSHvOnzhTMf8SQXzw3nIJQUS4lyo6nLdtv0Ebc0,19223
82
+ autocoder/index/index.py,sha256=HfWN0Mbk22S81-fnm9wrYNJO9Bcnws4G_PxJ8IQuH5A,20175
81
83
  autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
82
84
  autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
83
85
  autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
86
  autocoder/index/filter/normal_filter.py,sha256=pE5QwcBq6NYHFtYhwhfMJmYQYJwErNs-Q7iZmVBAh-k,7964
85
- autocoder/index/filter/quick_filter.py,sha256=BxOiZOlK2v6EnX0yV28R3ielXboTmrMvVwielCrqKpE,3678
87
+ autocoder/index/filter/quick_filter.py,sha256=1cRPAd8uUFkbPg4JlLaYCd3a8CMifVYEIpqBBaJ9GXo,3874
86
88
  autocoder/pyproject/__init__.py,sha256=dQ2_7YZ7guybT9BhfxSGn43eLQJGQN2zgeKa6--JlaQ,14403
87
89
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
90
  autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
@@ -137,9 +139,9 @@ autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
137
139
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
138
140
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=6D_SIa5hHSwIHC1poO_ztK7IVugAqNHu-jQySd7EnfQ,4181
139
141
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
140
- auto_coder-0.1.233.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
141
- auto_coder-0.1.233.dist-info/METADATA,sha256=w___EO9VOCh1T58HsPM3Qi5OYm2ktUVVD6I5KucBO3w,2641
142
- auto_coder-0.1.233.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
143
- auto_coder-0.1.233.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
144
- auto_coder-0.1.233.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
145
- auto_coder-0.1.233.dist-info/RECORD,,
142
+ auto_coder-0.1.235.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
143
+ auto_coder-0.1.235.dist-info/METADATA,sha256=JiMGcJmUn-bzkT0jg8tVMxu-D04DnaXV4On1qBk3iDc,2616
144
+ auto_coder-0.1.235.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
145
+ auto_coder-0.1.235.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
146
+ auto_coder-0.1.235.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
147
+ auto_coder-0.1.235.dist-info/RECORD,,
autocoder/auto_coder.py CHANGED
@@ -39,7 +39,7 @@ from rich.console import Console
39
39
  from rich.panel import Panel
40
40
  from rich.markdown import Markdown
41
41
  from rich.live import Live
42
- from autocoder.auto_coder_lang import get_message
42
+ from autocoder.common.auto_coder_lang import get_message
43
43
  from autocoder.common.memory_manager import save_to_memory_file
44
44
  from autocoder import models as models_module
45
45
  from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
@@ -69,8 +69,7 @@ def load_include_files(config, base_path, max_depth=10, current_depth=0):
69
69
  include_files = [include_files]
70
70
 
71
71
  for include_file in include_files:
72
- abs_include_path = resolve_include_path(base_path, include_file)
73
- logger.info(f"Loading include file: {abs_include_path}")
72
+ abs_include_path = resolve_include_path(base_path, include_file)
74
73
  with open(abs_include_path, "r") as f:
75
74
  include_config = yaml.safe_load(f)
76
75
  if not include_config:
@@ -131,15 +130,15 @@ def main(input_args: Optional[List[str]] = None):
131
130
  if not os.path.isabs(args.source_dir):
132
131
  args.source_dir = os.path.abspath(args.source_dir)
133
132
 
134
- if not args.silence:
135
- print("Command Line Arguments:")
136
- print("-" * 50)
137
- for arg, value in vars(args).items():
138
- if arg == "context" and value:
139
- print(f"{arg:20}: {value[:30]}...")
140
- else:
141
- print(f"{arg:20}: {value}")
142
- print("-" * 50)
133
+ # if not args.silence:
134
+ # print("Command Line Arguments:")
135
+ # print("-" * 50)
136
+ # for arg, value in vars(args).items():
137
+ # if arg == "context" and value:
138
+ # print(f"{arg:20}: {value[:30]}...")
139
+ # else:
140
+ # print(f"{arg:20}: {value}")
141
+ # print("-" * 50)
143
142
 
144
143
  # init store
145
144
  store = Store(os.path.join(args.source_dir, ".auto-coder", "metadata.db"))
@@ -244,10 +243,7 @@ def main(input_args: Optional[List[str]] = None):
244
243
  new_file = os.path.join(
245
244
  actions_dir, f"{new_seq}_{raw_args.name}.yml")
246
245
  with open(new_file, "w") as f:
247
- f.write(content)
248
-
249
- print(f"Successfully created new action file: {new_file}")
250
-
246
+ f.write(content)
251
247
  # open_yaml_file_in_editor(new_file)
252
248
  return
253
249
 
@@ -281,6 +277,22 @@ def main(input_args: Optional[List[str]] = None):
281
277
  byzerllm.connect_cluster(address=args.ray_address)
282
278
 
283
279
  llm = byzerllm.ByzerLLM(verbose=args.print_request)
280
+
281
+ code_model = byzerllm.ByzerLLM()
282
+ code_model.setup_default_model_name("deepseek_chat")
283
+ llm.setup_sub_client("code_model", code_model)
284
+
285
+ index_filter_model = byzerllm.ByzerLLM()
286
+ index_filter_model.setup_default_model_name("deepseek_r1_chat")
287
+ llm.setup_sub_client("index_filter_model", index_filter_model)
288
+
289
+ generate_rerank_model = byzerllm.ByzerLLM()
290
+ generate_rerank_model.setup_default_model_name("deepseek_r1_chat")
291
+ llm.setup_sub_client("generate_rerank_model", generate_rerank_model)
292
+
293
+ chat_model = byzerllm.ByzerLLM()
294
+ chat_model.setup_default_model_name("deepseek_r1_chat")
295
+ llm.setup_sub_client("chat_model", chat_model)
284
296
 
285
297
  if args.product_mode == "lite":
286
298
  llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_chat")
@@ -344,9 +356,23 @@ def main(input_args: Optional[List[str]] = None):
344
356
  }
345
357
  )
346
358
 
359
+ index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
360
+ index_filter_llm.deploy(
361
+ model_path="",
362
+ pretrained_model_type="saas/openai",
363
+ udf_name="deepseek_r1_chat",
364
+ infer_params={
365
+ "saas.base_url": "https://api.deepseek.com/v1",
366
+ "saas.api_key": api_key,
367
+ "saas.model": "deepseek-reasoner",
368
+ "saas.is_reasoning": True
369
+ }
370
+ )
371
+
347
372
  llm.setup_sub_client("code_model", code_llm)
348
373
  llm.setup_sub_client("chat_model", chat_llm)
349
374
  llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
375
+ llm.setup_sub_client("index_filter_model", index_filter_llm)
350
376
 
351
377
  if args.product_mode == "lite":
352
378
  # Set up default models based on configuration
@@ -444,7 +470,7 @@ def main(input_args: Optional[List[str]] = None):
444
470
  "saas.is_reasoning": model_info["is_reasoning"]
445
471
  }
446
472
  )
447
- llm.setup_sub_client("inference_model", inference_model)
473
+ llm.setup_sub_client("inference_model", inference_model)
448
474
 
449
475
  if args.index_filter_model:
450
476
  model_info = models_module.get_model_by_name(args.index_filter_model)
@@ -1114,20 +1140,25 @@ def main(input_args: Optional[List[str]] = None):
1114
1140
  chat_llm = llm
1115
1141
 
1116
1142
  source_count = 0
1117
- pre_conversations = []
1118
- if args.context:
1119
- context = json.loads(args.context)
1120
- if "file_content" in context:
1121
- file_content = context["file_content"]
1122
- pre_conversations.append(
1123
- {
1124
- "role": "user",
1125
- "content": f"请阅读下面的代码和文档:\n\n <files>\n{file_content}\n</files>",
1126
- },
1127
- )
1128
- pre_conversations.append(
1129
- {"role": "assistant", "content": "read"})
1130
- source_count += 1
1143
+ pre_conversations = []
1144
+ context_content = args.context if args.context else ""
1145
+ if args.context:
1146
+ try:
1147
+ context = json.loads(args.context)
1148
+ if "file_content" in context:
1149
+ context_content = context["file_content"]
1150
+ except:
1151
+ pass
1152
+
1153
+ pre_conversations.append(
1154
+ {
1155
+ "role": "user",
1156
+ "content": f"请阅读下面的代码和文档:\n\n <files>\n{context_content}\n</files>",
1157
+ },
1158
+ )
1159
+ pre_conversations.append(
1160
+ {"role": "assistant", "content": "read"})
1161
+ source_count += 1
1131
1162
 
1132
1163
  from autocoder.index.index import IndexManager
1133
1164
  from autocoder.index.entry import build_index_and_filter_files
@@ -1142,10 +1173,11 @@ def main(input_args: Optional[List[str]] = None):
1142
1173
  else:
1143
1174
  pp = SuffixProject(args=args, llm=llm, file_filter=None)
1144
1175
  pp.run()
1145
- sources = pp.sources
1176
+ sources = pp.sources
1146
1177
 
1147
1178
  s = build_index_and_filter_files(
1148
- llm=llm, args=args, sources=sources)
1179
+ llm=llm, args=args, sources=sources)
1180
+
1149
1181
  if s:
1150
1182
  pre_conversations.append(
1151
1183
  {
@@ -370,6 +370,33 @@ def initialize_system(args):
370
370
  "deepseek_chat",
371
371
  ]
372
372
 
373
+ try:
374
+ subprocess.run(deploy_cmd, check=True)
375
+ print_status(get_message("deploy_complete"), "success")
376
+ except subprocess.CalledProcessError:
377
+ print_status(get_message("deploy_fail"), "error")
378
+ return
379
+
380
+
381
+ deploy_cmd = [
382
+ "byzerllm",
383
+ "deploy",
384
+ "--pretrained_model_type",
385
+ "saas/reasoning_openai",
386
+ "--cpus_per_worker",
387
+ "0.001",
388
+ "--gpus_per_worker",
389
+ "0",
390
+ "--worker_concurrency",
391
+ "1000",
392
+ "--num_workers",
393
+ "1",
394
+ "--infer_params",
395
+ f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-reasoner",
396
+ "--model",
397
+ "deepseek_r1_chat",
398
+ ]
399
+
373
400
  try:
374
401
  subprocess.run(deploy_cmd, check=True)
375
402
  print_status(get_message("deploy_complete"), "success")
@@ -1715,7 +1742,8 @@ def coding(query: str):
1715
1742
  == "true",
1716
1743
  }
1717
1744
 
1718
- yaml_config["context"] = ""
1745
+ yaml_config["context"] = ""
1746
+ yaml_config["in_code_apply"] = is_apply
1719
1747
 
1720
1748
  for key, value in conf.items():
1721
1749
  converted_value = convert_config_value(key, value)
@@ -355,5 +355,8 @@ class AutoCoderArgs(pydantic.BaseModel):
355
355
  enable_global_memory: Optional[bool] = True
356
356
  product_mode: Optional[str] = "lite"
357
357
 
358
+ in_code_apply: bool = False
359
+
358
360
  class Config:
359
361
  protected_namespaces = ()
362
+
@@ -0,0 +1,90 @@
1
+ import locale
2
+
3
+ MESSAGES = {
4
+ "en": {
5
+ "index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
6
+ "index_update_success": "✅ Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
7
+ "index_build_error": "❌ Error building index for {{ file_path }}: {{ error }}",
8
+ "index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
9
+ "building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
10
+ "index_source_dir_mismatch": "⚠️ Source directory mismatch (file_path: {{ file_path }}, source_dir: {{ source_dir }})",
11
+ "index_related_files_fail": "⚠️ Failed to find related files for chunk {{ chunk_count }}",
12
+ "index_threads_completed": "✅ Completed {{ completed_threads }}/{{ total_threads }} threads",
13
+ "index_related_files_fail": "⚠️ Failed to find related files for chunk {{ chunk_count }}",
14
+ "human_as_model_instructions": (
15
+ "You are now in Human as Model mode. The content has been copied to your clipboard.\n"
16
+ "The system is waiting for your input. When finished, enter 'EOF' on a new line to submit.\n"
17
+ "Use '/break' to exit this mode. If you have issues with copy-paste, use '/clear' to clean and paste again."
18
+ ),
19
+ "clipboard_not_supported": (
20
+ "pyperclip not installed or clipboard is not supported, instruction will not be copied to clipboard."
21
+ ),
22
+ "human_as_model_instructions_no_clipboard": (
23
+ "You are now in Human as Model mode. [bold red]The content could not be copied to your clipboard.[/bold red]\n"
24
+ "but you can copy prompt from output.txt file.\n"
25
+ "The system is waiting for your input. When finished, enter 'EOF' on a new line to submit.\n"
26
+ "Use '/break' to exit this mode. If you have issues with copy-paste, use '/clear' to clean and paste again."
27
+ ),
28
+ "phase1_processing_sources": "Phase 1: Processing REST/RAG/Search sources...",
29
+ "phase2_building_index": "Phase 2: Building index for all files...",
30
+ "phase6_file_selection": "Phase 6: Processing file selection and limits...",
31
+ "phase7_preparing_output": "Phase 7: Preparing final output...",
32
+ "chat_human_as_model_instructions": (
33
+ "Chat is now in Human as Model mode.\n"
34
+ "The question has been copied to your clipboard.\n"
35
+ "Please use Web version model to get the answer.\n"
36
+ "Or use /conf human_as_model:false to close this mode and get the answer in terminal directly."
37
+ "Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
38
+ )
39
+ },
40
+ "zh": {
41
+ "index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
42
+ "index_update_success": "✅ 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
43
+ "index_build_error": "❌ 构建 {{ file_path }} 索引时出错: {{ error }}",
44
+ "index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
45
+ "building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
46
+ "index_source_dir_mismatch": "⚠️ 源目录不匹配 (文件路径: {{ file_path }}, 源目录: {{ source_dir }})",
47
+ "index_related_files_fail": "⚠️ 无法为块 {{ chunk_count }} 找到相关文件",
48
+ "index_threads_completed": "✅ 已完成 {{ completed_threads }}/{{ total_threads }} 个线程",
49
+ "index_related_files_fail": "⚠️ 无法为块 {{ chunk_count }} 找到相关文件",
50
+ "human_as_model_instructions": (
51
+ "您现在处于人类作为模型模式。内容已复制到您的剪贴板。\n"
52
+ "系统正在等待您的输入。完成后,在新行输入'EOF'提交。\n"
53
+ "使用'/break'退出此模式。如果复制粘贴有问题,使用'/clear'清理并重新粘贴。"
54
+ ),
55
+ "clipboard_not_supported": (
56
+ "未安装pyperclip或不支持剪贴板,指令将不会被复制到剪贴板。"
57
+ ),
58
+ "human_as_model_instructions_no_clipboard": (
59
+ "您现在处于人类作为模型模式。[bold red]内容无法复制到您的剪贴板。[/bold red]\n"
60
+ "但您可以从output.txt文件复制提示。\n"
61
+ "系统正在等待您的输入。完成后,在新行输入'EOF'提交。\n"
62
+ "使用'/break'退出此模式。如果复制粘贴有问题,使用'/clear'清理并重新粘贴。"
63
+ ),
64
+
65
+ "phase1_processing_sources": "阶段 1: 正在处理 REST/RAG/Search 源...",
66
+ "phase2_building_index": "阶段 2: 正在为所有文件构建索引...",
67
+ "phase6_file_selection": "阶段 6: 正在处理文件选择和限制...",
68
+ "phase7_preparing_output": "阶段 7: 正在准备最终输出...",
69
+
70
+ "chat_human_as_model_instructions": (
71
+ "\n============= Chat 处于 Human as Model 模式 =============\n"
72
+ "问题已复制到剪贴板\n"
73
+ "请使用Web版本模型获取答案\n"
74
+ "或者使用 /conf human_as_model:false 关闭该模式直接在终端获得答案。"
75
+ "将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
76
+ ),
77
+ }
78
+ }
79
+
80
+
81
+ def get_system_language():
82
+ try:
83
+ return locale.getdefaultlocale()[0][:2]
84
+ except:
85
+ return 'en'
86
+
87
+
88
+ def get_message(key):
89
+ lang = get_system_language()
90
+ return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
@@ -0,0 +1,36 @@
1
+ from rich.console import Console
2
+ from typing import Optional
3
+ from byzerllm.utils import format_str_jinja2
4
+ from autocoder.common.auto_coder_lang import get_message
5
+ from autocoder.chat_auto_coder_lang import get_message as get_chat_message
6
+ class Printer:
7
+ def __init__(self,console:Optional[Console]=None):
8
+ if console is None:
9
+ self.console = Console()
10
+ else:
11
+ self.console = console
12
+
13
+ def get_message_from_key(self, key: str):
14
+ try:
15
+ return get_message(key)
16
+ except Exception as e:
17
+ return get_chat_message(key)
18
+
19
+ def print_in_terminal(self, key: str, style: str = None,**kwargs):
20
+ try:
21
+ if style:
22
+ self.console.print(format_str_jinja2(self.get_message_from_key(key),**kwargs), style=style)
23
+ else:
24
+ self.console.print(format_str_jinja2(self.get_message_from_key(key),**kwargs))
25
+ except Exception as e:
26
+ print(self.get_message_from_key(key))
27
+
28
+
29
+ def print_str_in_terminal(self, content: str, style: str = None):
30
+ try:
31
+ if style:
32
+ self.console.print(content, style=style)
33
+ else:
34
+ self.console.print(content)
35
+ except Exception as e:
36
+ print(content)
@@ -54,9 +54,14 @@ class ActionTSProject(BaseAction):
54
54
 
55
55
  source_code = pp.output()
56
56
  if self.llm:
57
+ if args.in_code_apply:
58
+ old_query = args.query
59
+ args.query = (args.context or "") + "\n\n" + args.query
57
60
  source_code = build_index_and_filter_files(
58
61
  llm=self.llm, args=args, sources=pp.sources
59
62
  )
63
+ if args.in_code_apply:
64
+ args.query = old_query
60
65
 
61
66
  if args.image_file:
62
67
  if args.image_mode == "iterative":
@@ -256,11 +261,17 @@ class ActionPyProject(BaseAction):
256
261
  pp = PyProject(args=self.args, llm=self.llm)
257
262
  self.pp = pp
258
263
  pp.run(packages=args.py_packages.split(",") if args.py_packages else [])
259
- source_code = pp.output()
264
+ source_code = pp.output()
265
+
260
266
  if self.llm:
267
+ old_query = args.query
268
+ if args.in_code_apply:
269
+ args.query = (args.context or "") + "\n\n" + args.query
261
270
  source_code = build_index_and_filter_files(
262
271
  llm=self.llm, args=args, sources=pp.sources
263
272
  )
273
+ if args.in_code_apply:
274
+ args.query = old_query
264
275
 
265
276
  self.process_content(source_code)
266
277
  return True
@@ -355,9 +366,14 @@ class ActionSuffixProject(BaseAction):
355
366
  pp.run()
356
367
  source_code = pp.output()
357
368
  if self.llm:
369
+ if args.in_code_apply:
370
+ old_query = args.query
371
+ args.query = (args.context or "") + "\n\n" + args.query
358
372
  source_code = build_index_and_filter_files(
359
373
  llm=self.llm, args=args, sources=pp.sources
360
374
  )
375
+ if args.in_code_apply:
376
+ args.query = old_query
361
377
  self.process_content(source_code)
362
378
 
363
379
  def process_content(self, content: str):
@@ -36,9 +36,14 @@ class ActionRegexProject:
36
36
  pp.run()
37
37
  source_code = pp.output()
38
38
  if self.llm:
39
+ if args.in_code_apply:
40
+ old_query = args.query
41
+ args.query = (args.context or "") + "\n\n" + args.query
39
42
  source_code = build_index_and_filter_files(
40
43
  llm=self.llm, args=args, sources=pp.sources
41
44
  )
45
+ if args.in_code_apply:
46
+ args.query = old_query
42
47
  self.process_content(source_code)
43
48
 
44
49
  def process_content(self, content: str):
autocoder/index/entry.py CHANGED
@@ -9,7 +9,7 @@ from rich.console import Console
9
9
  from rich.table import Table
10
10
  from rich.panel import Panel
11
11
 
12
- from loguru import logger
12
+ from autocoder.common.printer import Printer
13
13
  from autocoder.utils.queue_communicate import (
14
14
  queue_communicate,
15
15
  CommunicateEvent,
@@ -58,7 +58,8 @@ def build_index_and_filter_files(
58
58
  final_files: Dict[str, TargetFile] = {}
59
59
 
60
60
  # Phase 1: Process REST/RAG/Search sources
61
- logger.info("Phase 1: Processing REST/RAG/Search sources...")
61
+ printer = Printer()
62
+ printer.print_in_terminal("phase1_processing_sources")
62
63
  phase_start = time.monotonic()
63
64
  for source in sources:
64
65
  if source.tag in ["REST", "RAG", "SEARCH"]:
@@ -79,7 +80,7 @@ def build_index_and_filter_files(
79
80
  )
80
81
  )
81
82
 
82
- logger.info("Phase 2: Building index for all files...")
83
+ printer.print_in_terminal("phase2_building_index")
83
84
  phase_start = time.monotonic()
84
85
  index_manager = IndexManager(llm=llm, sources=sources, args=args)
85
86
  index_data = index_manager.build_index()
@@ -133,19 +134,48 @@ def build_index_and_filter_files(
133
134
 
134
135
  return [file for file in result] if result else []
135
136
 
137
+ def shorten_path(path: str, keep_levels: int = 3) -> str:
138
+ """
139
+ 优化长路径显示,保留最后指定层级
140
+ 示例:/a/b/c/d/e/f.py -> .../c/d/e/f.py
141
+ """
142
+ parts = path.split(os.sep)
143
+ if len(parts) > keep_levels:
144
+ return ".../" + os.sep.join(parts[-keep_levels:])
145
+ return path
146
+
136
147
  def print_selected(data):
137
148
  console = Console()
149
+
150
+ # 获取终端宽度
151
+ console_width = console.width
138
152
 
139
153
  table = Table(
140
154
  title="Files Used as Context",
141
155
  show_header=True,
142
156
  header_style="bold magenta",
157
+ # 设置表格最大宽度为终端宽度(留 10 字符边距)
158
+ width=min(console_width - 10, 120),
159
+ expand=True
143
160
  )
144
- table.add_column("File Path", style="cyan", no_wrap=True)
145
- table.add_column("Reason", style="green")
161
+
162
+ # 优化列配置
163
+ table.add_column("File Path",
164
+ style="cyan",
165
+ width=int((console_width - 10) * 0.6), # 分配 60% 宽度给文件路径
166
+ overflow="fold", # 自动折叠过长的路径
167
+ no_wrap=False) # 允许换行
168
+
169
+ table.add_column("Reason",
170
+ style="green",
171
+ width=int((console_width - 10) * 0.4), # 分配 40% 宽度给原因
172
+ no_wrap=False)
146
173
 
174
+ # 添加处理过的文件路径
147
175
  for file, reason in data:
148
- table.add_row(file, reason)
176
+ # 路径截取优化:保留最后 3 级路径
177
+ processed_path = shorten_path(file, keep_levels=3)
178
+ table.add_row(processed_path, reason)
149
179
 
150
180
  panel = Panel(
151
181
  table,
@@ -157,7 +187,7 @@ def build_index_and_filter_files(
157
187
  console.print(panel)
158
188
 
159
189
  # Phase 6: File selection and limitation
160
- logger.info("Phase 6: Processing file selection and limits...")
190
+ printer.print_in_terminal("phase6_file_selection")
161
191
  phase_start = time.monotonic()
162
192
 
163
193
  if args.index_filter_file_num > 0:
@@ -188,7 +218,7 @@ def build_index_and_filter_files(
188
218
  stats["timings"]["file_selection"] = phase_end - phase_start
189
219
 
190
220
  # Phase 7: Display results and prepare output
191
- logger.info("Phase 7: Preparing final output...")
221
+ printer.print_in_terminal("phase7_preparing_output")
192
222
  phase_start = time.monotonic()
193
223
  try:
194
224
  print_selected(
@@ -269,7 +299,7 @@ def build_index_and_filter_files(
269
299
  • Total time: {total_time:.2f}s
270
300
  ====================================
271
301
  """
272
- logger.info(summary)
302
+ printer.print_str_in_terminal(summary)
273
303
 
274
304
  if args.request_id and not args.skip_events:
275
305
  queue_communicate.send_event(
@@ -55,7 +55,10 @@ class QuickFilter():
55
55
  }
56
56
  ```
57
57
 
58
- 特别注意,如果用户的query里 @文件 或者 @@符号,那么被@的文件或者@@的符号必须要返回,并且查看他们依赖的文件是否相关。
58
+ 特别注意
59
+ 1. 如果用户的query里 @文件 或者 @@符号,那么被@的文件或者@@的符号必须要返回,并且查看他们依赖的文件是否相关。
60
+ 2. 如果 query 里是一段历史对话,那么对话里的内容提及的文件路径必须要返回。
61
+ 3. json格式数据不允许有注释
59
62
  '''
60
63
  file_meta_str = "\n".join([f"##[{index}]{item.module_name}\n{item.symbols}" for index,item in enumerate(file_meta_list)])
61
64
  context = {
@@ -66,15 +69,13 @@ class QuickFilter():
66
69
 
67
70
  def filter(self, index_items: List[IndexItem], query: str) -> Dict[str, TargetFile]:
68
71
  final_files: Dict[str, TargetFile] = {}
69
- if not self.args.skip_filter_index and self.args.index_filter_model:
72
+ if not self.args.skip_filter_index and self.index_manager.llm.get_sub_client("index_filter_model"):
70
73
  start_time = time.monotonic()
71
74
  index_items = self.index_manager.read_index()
72
75
 
73
- prompt_str = self.quick_filter_files.prompt(index_items,query)
74
-
75
- print(prompt_str)
76
+ prompt_str = self.quick_filter_files.prompt(index_items,query)
76
77
 
77
- tokens_len = count_tokens(prompt_str)
78
+ tokens_len = count_tokens(prompt_str)
78
79
 
79
80
  if tokens_len > 55*1024:
80
81
  logger.warning(f"Quick filter prompt is too long, tokens_len: {tokens_len}/{55*1024} fallback to normal filter")
autocoder/index/index.py CHANGED
@@ -15,7 +15,8 @@ import threading
15
15
  import byzerllm
16
16
  import hashlib
17
17
 
18
- from loguru import logger
18
+ from autocoder.common.printer import Printer
19
+ from autocoder.common.auto_coder_lang import get_message
19
20
  from autocoder.index.types import (
20
21
  IndexItem,
21
22
  TargetFile,
@@ -48,6 +49,7 @@ class IndexManager:
48
49
  self.max_input_length = (
49
50
  args.index_model_max_input_length or args.model_max_input_length
50
51
  )
52
+ self.printer = Printer()
51
53
 
52
54
  # 如果索引目录不存在,则创建它
53
55
  if not os.path.exists(self.index_dir):
@@ -206,8 +208,12 @@ class IndexManager:
206
208
  start_time = time.monotonic()
207
209
  source_code = source.source_code
208
210
  if len(source.source_code) > self.max_input_length:
209
- logger.warning(
210
- f"Warning[Build Index]: The length of source code({source.module_name}) is too long ({len(source.source_code)}) > model_max_input_length({self.max_input_length}), splitting into chunks..."
211
+ self.printer.print_in_terminal(
212
+ "index_file_too_large",
213
+ style="yellow",
214
+ file_path=source.module_name,
215
+ file_size=len(source.source_code),
216
+ max_length=self.max_input_length
211
217
  )
212
218
  chunks = self.split_text_into_chunks(
213
219
  source_code, self.max_input_length - 1000
@@ -224,12 +230,23 @@ class IndexManager:
224
230
  self.index_llm).run(source.module_name, source_code)
225
231
  time.sleep(self.anti_quota_limit)
226
232
 
227
- logger.info(
228
- f"Parse and update index for {file_path} md5: {md5} took {time.monotonic() - start_time:.2f}s"
233
+ self.printer.print_in_terminal(
234
+ "index_update_success",
235
+ style="green",
236
+ file_path=file_path,
237
+ md5=md5,
238
+ duration=time.monotonic() - start_time
229
239
  )
230
240
 
231
241
  except Exception as e:
232
- logger.warning(f"Error: {e}")
242
+ # import traceback
243
+ # traceback.print_exc()
244
+ self.printer.print_in_terminal(
245
+ "index_build_error",
246
+ style="red",
247
+ file_path=file_path,
248
+ error=str(e)
249
+ )
233
250
  return None
234
251
 
235
252
  return {
@@ -255,8 +272,11 @@ class IndexManager:
255
272
 
256
273
  for item in index_data.keys():
257
274
  if not item.startswith(self.source_dir):
258
- logger.warning(
259
- error_message(source_dir=self.source_dir, file_path=item)
275
+ self.printer.print_in_terminal(
276
+ "index_source_dir_mismatch",
277
+ style="yellow",
278
+ source_dir=self.source_dir,
279
+ file_path=item
260
280
  )
261
281
  break
262
282
 
@@ -291,8 +311,12 @@ class IndexManager:
291
311
  counter = 0
292
312
  num_files = len(wait_to_build_files)
293
313
  total_files = len(self.sources)
294
- logger.info(
295
- f"Total Files: {total_files}, Need to Build Index: {num_files}")
314
+ self.printer.print_in_terminal(
315
+ "index_build_summary",
316
+ style="bold blue",
317
+ total_files=total_files,
318
+ num_files=num_files
319
+ )
296
320
 
297
321
  futures = [
298
322
  executor.submit(self.build_index_for_single_source, source)
@@ -302,7 +326,12 @@ class IndexManager:
302
326
  result = future.result()
303
327
  if result is not None:
304
328
  counter += 1
305
- logger.info(f"Building Index:{counter}/{num_files}...")
329
+ self.printer.print_in_terminal(
330
+ "building_index_progress",
331
+ style="blue",
332
+ counter=counter,
333
+ num_files=num_files
334
+ )
306
335
  module_name = result["module_name"]
307
336
  index_data[module_name] = result
308
337
  updated_sources.append(module_name)
@@ -404,8 +433,10 @@ class IndexManager:
404
433
  with lock:
405
434
  all_results.extend(result.file_list)
406
435
  else:
407
- logger.warning(
408
- f"Fail to find related files for chunk {chunk_count}. This may be caused by the model limit or the query not being suitable for the files."
436
+ self.printer.print_in_terminal(
437
+ "index_related_files_fail",
438
+ style="yellow",
439
+ chunk_count=chunk_count
409
440
  )
410
441
  time.sleep(self.args.anti_quota_limit)
411
442
 
@@ -442,8 +473,10 @@ class IndexManager:
442
473
  all_results.extend(result.file_list)
443
474
  completed_threads += 1
444
475
  else:
445
- logger.warning(
446
- f"Fail to find target files for chunk. This is caused by the model response not being in JSON format or the JSON being empty."
476
+ self.printer.print_in_terminal(
477
+ "index_related_files_fail",
478
+ style="yellow",
479
+ chunk_count="unknown"
447
480
  )
448
481
  time.sleep(self.args.anti_quota_limit)
449
482
 
@@ -457,7 +490,12 @@ class IndexManager:
457
490
  for future in as_completed(futures):
458
491
  future.result()
459
492
 
460
- logger.info(f"Completed {completed_threads}/{total_threads} threads")
493
+ self.printer.print_in_terminal(
494
+ "index_threads_completed",
495
+ style="green",
496
+ completed_threads=completed_threads,
497
+ total_threads=total_threads
498
+ )
461
499
  return all_results, total_threads, completed_threads
462
500
 
463
501
  def get_target_files_by_query(self, query: str) -> FileList:
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.233"
1
+ __version__ = "0.1.235"