auto-coder 0.1.245__py3-none-any.whl → 0.1.247__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.245
3
+ Version: 0.1.247
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -1,17 +1,17 @@
1
1
  autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autocoder/auto_coder.py,sha256=832zmnXmZ7RlqsPnuEj-2IuhsJI0QkCGHJICH6QRCAY,63333
2
+ autocoder/auto_coder.py,sha256=tElv9VAvv1hfb9qt8R9EFbXqhYs6pm89cy4mPIFFUQE,63127
3
3
  autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
4
4
  autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
8
8
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
9
- autocoder/chat_auto_coder.py,sha256=SLJQzXuQoj_2mcdbANG93ZM2-wIQsptA_h3VRv5xZAQ,105926
10
- autocoder/chat_auto_coder_lang.py,sha256=gbpjfMd1wYiIrOlLDc-G7eI497mMwjM_ud9GvO-wo9k,15261
9
+ autocoder/chat_auto_coder.py,sha256=OUpoT-kUNoDS5VlO-Tui9zztd2ACyVa_kYQueBhb294,106724
10
+ autocoder/chat_auto_coder_lang.py,sha256=C0QJkyIhW-9aksHznrcATf0JdI1uKqOr46zW4RZ2ljA,15759
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
- autocoder/models.py,sha256=7Z97Hzc_26dZG_wm6M2f9TL1ZxzzIN649U_Z0-m28EU,5342
14
- autocoder/version.py,sha256=sncGdxYQvG5ZX4oQL4xUEZ96_LS3u1YIOVwACJUeMF4,23
13
+ autocoder/models.py,sha256=_9Kc8oS_tnnqKzRGnybgOfn1NOey7OXZ8y9qhBmgiB4,5517
14
+ autocoder/version.py,sha256=rZZLAPZf903zmHSN4JzYQtRLpVsQe-LUkE0nelZEb6g,23
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -29,12 +29,12 @@ autocoder/common/__init__.py,sha256=2isE_u4VgfogwmcUCnFcussVFlzeNOLHDMFm5z_axbU,
29
29
  autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
30
30
  autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
31
31
  autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
32
- autocoder/common/auto_coder_lang.py,sha256=7sr3Dz43ASeWYLtMvkE6tMd8dWDPZBJOxNVQ8rC54Js,13963
32
+ autocoder/common/auto_coder_lang.py,sha256=EBX8EVxoV6MxVFLYd_4kB7ZZyZN5akqBuG6A2jHeiX8,14912
33
33
  autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
34
34
  autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
35
35
  autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
36
36
  autocoder/common/code_auto_execute.py,sha256=4KXGmiGObr_B1d6tzV9dwS6MifCSc3Gm4j2d6ildBXQ,6867
37
- autocoder/common/code_auto_generate.py,sha256=5lEW9iudGYQIcd_QjrGyGaEmrWuyZ625PNVF6XX2rNs,10308
37
+ autocoder/common/code_auto_generate.py,sha256=6rrtdEz8JhlKNutEqlWbgKUlACk90Lcm0UebtuAKEQ0,10320
38
38
  autocoder/common/code_auto_generate_diff.py,sha256=o5yeqpc3WXSRWlcLzhlwJNosKo7dcj0CeIsFh6Aibus,16248
39
39
  autocoder/common/code_auto_generate_editblock.py,sha256=QdUHUkGaervvQNCY8T2vQ_tfnQX_2kxxu4qq_QW_Nn8,17828
40
40
  autocoder/common/code_auto_generate_strict_diff.py,sha256=uteWDEHfIbrnVgwKgqC7qwrIeW0enJCXcHzZGa48yY8,14774
@@ -42,13 +42,13 @@ autocoder/common/code_auto_merge.py,sha256=8dtnz61l0B5gNbQmx26TZ4_jD825dsnnWtAFD
42
42
  autocoder/common/code_auto_merge_diff.py,sha256=yocfe8s3Pz6hTGDUl9wRIewY3NcTize_gEla64lsGT0,15331
43
43
  autocoder/common/code_auto_merge_editblock.py,sha256=sp7C0fZJMVcNgI8uWy43CKDk7gGXFTkMB9kbP2VdY8k,17485
44
44
  autocoder/common/code_auto_merge_strict_diff.py,sha256=9rm0NJ_n6M3LohEX7xl1Jym0xmm8UEYqj_ZTSO3oSlM,9519
45
- autocoder/common/code_modification_ranker.py,sha256=l0OAR7ad0hTV3xdfn7rO0KqnY5Y5qSG4BmWcClZ9RUQ,6104
45
+ autocoder/common/code_modification_ranker.py,sha256=TycYdtxfNfde2N9-9jEB-9IL2Q3PoHS-k-r9JqZYD6s,6545
46
46
  autocoder/common/command_completer.py,sha256=SSeb8MDH0JPvfdyW-S2uaHnui4VBDfSQvQPLbv3ORPA,9314
47
47
  autocoder/common/command_generator.py,sha256=v4LmU7sO-P7jEZIXCWHUC6P-vT7AvBi_x_PTwCqBAE8,1323
48
48
  autocoder/common/command_templates.py,sha256=mnB3n8i0yjH1mqzyClEg8Wpr9VbZV44kxky66Zu6OJY,8557
49
49
  autocoder/common/const.py,sha256=eTjhjh4Aj4CUzviJ81jaf3Y5cwqsLATySn2wJxaS6RQ,2911
50
50
  autocoder/common/files.py,sha256=CguxG9digkWBJpRaILErZmL_G5ryPRahPmPFWGB7X18,1973
51
- autocoder/common/git_utils.py,sha256=btK45sxvfm4tX3fBRNUPRZoGQuZuOEQrWSAwLy1yoLw,23095
51
+ autocoder/common/git_utils.py,sha256=zxgQt2PukabV_21podylAUzTY7Xk60bsQ7MQYw4s-Tg,23234
52
52
  autocoder/common/image_to_page.py,sha256=O0cNO_vHHUP-fP4GXiVojShmNqkPnZXeIyiY1MRLpKg,13936
53
53
  autocoder/common/interpreter.py,sha256=62-dIakOunYB4yjmX8SHC0Gdy2h8NtxdgbpdqRZJ5vk,2833
54
54
  autocoder/common/llm_rerank.py,sha256=FbvtCzaR661Mt2wn0qsuiEL1Y3puD6jeIJS4zg_e7Bs,3260
@@ -78,14 +78,14 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
78
78
  autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ht_HWzZt84IEogoFMggnXI6aFFerrsuksVflAkcodfU,5545
79
79
  autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
80
80
  autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
- autocoder/index/entry.py,sha256=oFdSJW4ypc3_mhE3fk2O9UB34XMcq3a1Sp2MeTlfQ2o,11820
81
+ autocoder/index/entry.py,sha256=hXSOi-jPgRBKQB55eqWkT95vxvWrbVHDuIMtDkqjNNw,12232
82
82
  autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
83
- autocoder/index/index.py,sha256=VjfcBYHywU4tjQTA7mpHfzRM8nBPhPHrUnkuBbsj6do,20409
83
+ autocoder/index/index.py,sha256=xwh22nY0TtEJMJwYjOUd6xdRZozYXBt47YSwheZSP-4,20679
84
84
  autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
85
85
  autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
86
86
  autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
- autocoder/index/filter/normal_filter.py,sha256=V0MAUKgEG9vVTwZK5lMfpZCjU57S6cBioeHDjog0kLs,7992
88
- autocoder/index/filter/quick_filter.py,sha256=Omvsz9O1xQEH4xP-wNuCZhxn69P7Y59SiLPUIDuGFiA,3851
87
+ autocoder/index/filter/normal_filter.py,sha256=APu34iSvWhtlLtWgkj8N3Vo4oW1TegtZQq2bwDX_cs4,8031
88
+ autocoder/index/filter/quick_filter.py,sha256=k0c3liMC6M7tP2rGorZU1DJu2mqQlc_URf3BAsSWqcY,5490
89
89
  autocoder/pyproject/__init__.py,sha256=dQ2_7YZ7guybT9BhfxSGn43eLQJGQN2zgeKa6--JlaQ,14403
90
90
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
91
  autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
@@ -121,7 +121,7 @@ autocoder/rag/stream_event/types.py,sha256=rtLwOE8rShmi1dJdxyBpAV5ZjLBGG9vptMiSz
121
121
  autocoder/regex_project/__init__.py,sha256=EBZeCL5ORyD_9_5u_UuG4s7XtpXOu0y1sWDmxWFtufE,6781
122
122
  autocoder/regexproject/__init__.py,sha256=cEr-ZOaQjLD5sx7T7F2DhD5ips03HcJ02rded9EpSXc,9693
123
123
  autocoder/suffixproject/__init__.py,sha256=VcXjUbGf3uQrpoqVCItDvGG9DoeHJ_qEmghKwrVNw9w,11058
124
- autocoder/tsproject/__init__.py,sha256=yloVzkGLnbTd4Hcj9fMO-rcjNTTx4wI3Ga41LWOSYrY,11747
124
+ autocoder/tsproject/__init__.py,sha256=boNuRCHi94xI_y4tvL5LKgSZ4gYxcPqUUQTw9MU_STI,11751
125
125
  autocoder/utils/__init__.py,sha256=KtcGElFNBgZPF7dEL8zF9JpXkCAjoyDrzaREJBhJrcs,994
126
126
  autocoder/utils/_markitdown.py,sha256=RU88qn4eZfYIy0GDrPxlI8oYXIypbi63VRJjdlnE0VU,47431
127
127
  autocoder/utils/coder.py,sha256=rK8e0svQBe0NOP26dIGToUXgha_hUDgxlWoC_p_r7oc,5698
@@ -139,11 +139,11 @@ autocoder/utils/rest.py,sha256=hLBhr78y-WVnV0oQf9Rxc22EwqF78KINkScvYa1MuYA,6435
139
139
  autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
140
140
  autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
141
141
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
142
- autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=8OV1VOdbj8O7JXDrBUhXVPmMkoCd9n-hvshtR2XXYxk,9112
142
+ autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=5MCtQwQDttvr0pRcYlYERFQpQAdi1Yvyf5Hm6F-_jvQ,9433
143
143
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
144
- auto_coder-0.1.245.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
145
- auto_coder-0.1.245.dist-info/METADATA,sha256=OtK6YNCP5q11TSBcEWYhfDeljzTN7OqNYW4ze_mggmk,2616
146
- auto_coder-0.1.245.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
147
- auto_coder-0.1.245.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
148
- auto_coder-0.1.245.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
149
- auto_coder-0.1.245.dist-info/RECORD,,
144
+ auto_coder-0.1.247.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
145
+ auto_coder-0.1.247.dist-info/METADATA,sha256=hS2KU58cmjBGHdqYM4e--IBprziX3RJUeLDBZnZEySA,2616
146
+ auto_coder-0.1.247.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
147
+ auto_coder-0.1.247.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
148
+ auto_coder-0.1.247.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
149
+ auto_coder-0.1.247.dist-info/RECORD,,
autocoder/auto_coder.py CHANGED
@@ -276,7 +276,7 @@ def main(input_args: Optional[List[str]] = None):
276
276
  )
277
277
  byzerllm.connect_cluster(address=args.ray_address)
278
278
 
279
- llm = byzerllm.ByzerLLM(verbose=args.print_request)
279
+ llm = byzerllm.ByzerLLM(verbose=args.print_request)
280
280
 
281
281
  # code_model,index_filter_model,generate_rerank_model,chat_model
282
282
  # 这四个模型如果用户没有设置,就会使用默认的
@@ -300,86 +300,66 @@ def main(input_args: Optional[List[str]] = None):
300
300
  llm.setup_sub_client("chat_model", chat_model)
301
301
 
302
302
  if args.product_mode == "lite":
303
- default_model = args.model
303
+ default_model = args.model
304
+ model_info = models_module.get_model_by_name(default_model)
304
305
  llm = byzerllm.SimpleByzerLLM(default_model_name=default_model)
305
- api_key_dir = os.path.expanduser("~/.auto-coder/keys")
306
- api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
307
-
308
- if not os.path.exists(api_key_file):
309
- raise Exception(f"API key file not found: {api_key_file}")
310
-
311
- with open(api_key_file, "r") as f:
312
- api_key = f.read()
313
-
314
306
  llm.deploy(
315
307
  model_path="",
316
- pretrained_model_type="saas/openai",
317
- udf_name=default_model,
308
+ pretrained_model_type=model_info["model_type"],
309
+ udf_name=args.model,
318
310
  infer_params={
319
- "saas.base_url": "https://api.deepseek.com/v1",
320
- "saas.api_key": api_key,
321
- "saas.model": "deepseek-chat",
322
- "saas.is_reasoning": False
311
+ "saas.base_url": model_info["base_url"],
312
+ "saas.api_key": model_info["api_key"],
313
+ "saas.model": model_info["model_name"],
314
+ "saas.is_reasoning": model_info["is_reasoning"]
323
315
  }
324
- )
325
-
326
- code_llm = byzerllm.SimpleByzerLLM(default_model)
327
- code_llm.deploy(
328
- model_path="",
329
- pretrained_model_type="saas/openai",
330
- udf_name=default_model,
331
- infer_params={
332
- "saas.base_url": "https://api.deepseek.com/v1",
333
- "saas.api_key": api_key,
334
- "saas.model": "deepseek-chat",
335
- "saas.is_reasoning": False
336
- }
337
- )
338
-
339
- chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
340
- chat_llm.deploy(
341
- model_path="",
342
- pretrained_model_type="saas/openai",
343
- udf_name="deepseek_r1_chat",
344
- infer_params={
345
- "saas.base_url": "https://api.deepseek.com/v1",
346
- "saas.api_key": api_key,
347
- "saas.model": "deepseek-reasoner",
348
- "saas.is_reasoning": True
349
- }
350
- )
316
+ )
317
+
318
+ if models_module.check_model_exists("deepseek_r1_chat"):
319
+ r1_model_info = models_module.get_model_by_name("deepseek_r1_chat")
320
+ api_key = r1_model_info["api_key"]
321
+ chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
322
+ chat_llm.deploy(
323
+ model_path="",
324
+ pretrained_model_type="saas/openai",
325
+ udf_name="deepseek_r1_chat",
326
+ infer_params={
327
+ "saas.base_url": "https://api.deepseek.com/v1",
328
+ "saas.api_key": api_key,
329
+ "saas.model": "deepseek-reasoner",
330
+ "saas.is_reasoning": True
331
+ }
332
+ )
351
333
 
352
- generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
353
- generate_rerank_llm.deploy(
354
- model_path="",
355
- pretrained_model_type="saas/openai",
356
- udf_name="deepseek_r1_chat",
357
- infer_params={
358
- "saas.base_url": "https://api.deepseek.com/v1",
359
- "saas.api_key": api_key,
360
- "saas.model": "deepseek-reasoner",
361
- "saas.is_reasoning": True
362
- }
363
- )
334
+ generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
335
+ generate_rerank_llm.deploy(
336
+ model_path="",
337
+ pretrained_model_type="saas/openai",
338
+ udf_name="deepseek_r1_chat",
339
+ infer_params={
340
+ "saas.base_url": "https://api.deepseek.com/v1",
341
+ "saas.api_key": api_key,
342
+ "saas.model": "deepseek-reasoner",
343
+ "saas.is_reasoning": True
344
+ }
345
+ )
364
346
 
365
- index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
366
- index_filter_llm.deploy(
367
- model_path="",
368
- pretrained_model_type="saas/openai",
369
- udf_name="deepseek_r1_chat",
370
- infer_params={
371
- "saas.base_url": "https://api.deepseek.com/v1",
372
- "saas.api_key": api_key,
373
- "saas.model": "deepseek-reasoner",
374
- "saas.is_reasoning": True
375
- }
376
- )
377
-
378
- # 这四个模型如果用户没有设置,就会使用默认的
379
- llm.setup_sub_client("code_model", code_llm)
380
- llm.setup_sub_client("chat_model", chat_llm)
381
- llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
382
- llm.setup_sub_client("index_filter_model", index_filter_llm)
347
+ index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
348
+ index_filter_llm.deploy(
349
+ model_path="",
350
+ pretrained_model_type="saas/openai",
351
+ udf_name="deepseek_r1_chat",
352
+ infer_params={
353
+ "saas.base_url": "https://api.deepseek.com/v1",
354
+ "saas.api_key": api_key,
355
+ "saas.model": "deepseek-reasoner",
356
+ "saas.is_reasoning": True
357
+ }
358
+ )
359
+
360
+ llm.setup_sub_client("chat_model", chat_llm)
361
+ llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
362
+ llm.setup_sub_client("index_filter_model", index_filter_llm)
383
363
 
384
364
  if args.product_mode == "lite":
385
365
  # Set up default models based on configuration
@@ -1359,7 +1339,7 @@ def main(input_args: Optional[List[str]] = None):
1359
1339
  elif "review_commit" in args.action:
1360
1340
  from autocoder.agent.auto_review_commit import AutoReviewCommit
1361
1341
  reviewer = AutoReviewCommit(llm=chat_llm, args=args)
1362
- v = reviewer.review_commit(args.query)
1342
+ v = reviewer.review_commit(query=args.query,conversations=loaded_conversations)
1363
1343
  else:
1364
1344
  v = stream_chat_with_continue(
1365
1345
  llm=chat_llm,
@@ -1367,12 +1347,16 @@ def main(input_args: Optional[List[str]] = None):
1367
1347
  llm_config={}
1368
1348
  )
1369
1349
 
1370
-
1350
+
1351
+ model_name = getattr(chat_llm, 'default_model_name', None)
1352
+ if not model_name:
1353
+ model_name = "unknown(without default model name)"
1371
1354
 
1372
1355
  assistant_response, last_meta = stream_out(
1373
1356
  v,
1374
1357
  request_id=args.request_id,
1375
- console=console
1358
+ console=console,
1359
+ model_name=model_name
1376
1360
  )
1377
1361
 
1378
1362
  # 打印耗时和token统计
@@ -1712,14 +1712,24 @@ def commit(query: str):
1712
1712
  finally:
1713
1713
  if os.path.exists(temp_yaml):
1714
1714
  os.remove(temp_yaml)
1715
-
1716
- llm = get_single_llm(args.code_model or args.model, product_mode)
1717
- uncommitted_changes = git_utils.get_uncommitted_changes(".")
1718
- commit_message = git_utils.generate_commit_message.with_llm(llm).run(
1719
- uncommitted_changes
1720
- )
1721
- memory["conversation"].append(
1722
- {"role": "user", "content": commit_message})
1715
+
1716
+ target_model = args.code_model or args.model
1717
+ llm = get_single_llm(target_model, product_mode)
1718
+ printer = Printer()
1719
+ printer.print_in_terminal("commit_generating", style="yellow", model_name=target_model)
1720
+ commit_message = ""
1721
+
1722
+ try:
1723
+ uncommitted_changes = git_utils.get_uncommitted_changes(".")
1724
+ commit_message = git_utils.generate_commit_message.with_llm(llm).run(
1725
+ uncommitted_changes
1726
+ )
1727
+ memory["conversation"].append(
1728
+ {"role": "user", "content": commit_message})
1729
+ except Exception as e:
1730
+ printer.print_in_terminal("commit_failed", style="red", error=str(e), model_name=target_model)
1731
+ return
1732
+
1723
1733
  yaml_config["query"] = commit_message
1724
1734
  yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1725
1735
  with open(os.path.join(execute_file), "w") as f:
@@ -1732,6 +1742,8 @@ def commit(query: str):
1732
1742
  ".", f"auto_coder_{file_name}_{md5}"
1733
1743
  )
1734
1744
  git_utils.print_commit_info(commit_result=commit_result)
1745
+ if commit_message:
1746
+ printer.print_in_terminal("commit_message", style="green", model_name=target_model, message=commit_message)
1735
1747
  except Exception as e:
1736
1748
  import traceback
1737
1749
  traceback.print_exc()
@@ -1875,8 +1887,8 @@ def coding(query: str):
1875
1887
  @byzerllm.prompt()
1876
1888
  def code_review(query: str) -> str:
1877
1889
  """
1878
- 对代码进行review,参考如下检查点。
1879
- 1. 有没有调用不符合方法,类的签名的调用
1890
+ 掐面提供了上下文,对代码进行review,参考如下检查点。
1891
+ 1. 有没有调用不符合方法,类的签名的调用,包括对第三方类,模块,方法的检查(如果上下文提供了这些信息)
1880
1892
  2. 有没有未声明直接使用的变量,方法,类
1881
1893
  3. 有没有明显的语法错误
1882
1894
  4. 如果是python代码,检查有没有缩进方面的错误
@@ -2706,9 +2718,10 @@ def main():
2706
2718
  memory["mode"] = "normal"
2707
2719
 
2708
2720
  # 处理 user_input 的空格
2709
- temp_user_input = user_input.lstrip() # 去掉左侧空格
2710
- if temp_user_input.startswith('/'):
2711
- user_input = temp_user_input
2721
+ if user_input:
2722
+ temp_user_input = user_input.lstrip() # 去掉左侧空格
2723
+ if temp_user_input.startswith('/'):
2724
+ user_input = temp_user_input
2712
2725
 
2713
2726
  if (
2714
2727
  memory["mode"] == "auto_detect"
@@ -113,9 +113,15 @@ MESSAGES = {
113
113
  "remove_files_none": "No files were removed.",
114
114
  "files_removed": "Files Removed",
115
115
  "models_api_key_empty": "Warning : {{name}} API key is empty. Please set a valid API key.",
116
+ "commit_generating": "{{ model_name }} Generating commit message...",
117
+ "commit_message": "{{ model_name }} Generated commit message: {{ message }}",
118
+ "commit_failed": "{{ model_name }} Failed to generate commit message: {{ error }}",
116
119
  },
117
120
  "zh": {
118
- "mcp_remove_error": "移除 MCP 服务器时出错:{error}",
121
+ "commit_generating": "{{ model_name }} 正在生成提交信息...",
122
+ "commit_message": "{{ model_name }} 生成的提交信息: {{ message }}",
123
+ "commit_failed": "{{ model_name }} 生成提交信息失败: {{ error }}",
124
+ "mcp_remove_error": "移除 MCP 服务器时出错:{error}",
119
125
  "mcp_remove_success": "成功移除 MCP 服务器:{result}",
120
126
  "mcp_list_running_error": "列出运行中的 MCP 服务器时出错:{error}",
121
127
  "mcp_list_running_title": "正在运行的 MCP 服务器:",
@@ -14,8 +14,8 @@ MESSAGES = {
14
14
  "no_latest_commit": "Unable to get latest commit information",
15
15
  "code_review_error": "Code review process error: {{error}}",
16
16
  "index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
17
- "index_update_success": "✅ Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
18
- "index_build_error": "❌ Error building index for {{ file_path }}: {{ error }}",
17
+ "index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
18
+ "index_build_error": "❌ {{ model_name }} Error building index for {{ file_path }}: {{ error }}",
19
19
  "index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
20
20
  "building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
21
21
  "index_source_dir_mismatch": "⚠️ Source directory mismatch (file_path: {{ file_path }}, source_dir: {{ source_dir }})",
@@ -53,8 +53,8 @@ MESSAGES = {
53
53
  "code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}",
54
54
  "code_merge_start": "Auto merge the code...",
55
55
  "code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
56
- "quick_filter_start": "Starting filter context(quick_filter)...",
57
- "normal_filter_start": "Starting filter context(normal_filter)...",
56
+ "quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
57
+ "normal_filter_start": "{{ model_name }} Starting filter context(normal_filter)...",
58
58
  "pylint_check_failed": "⚠️ Pylint check failed: {{ error_message }}",
59
59
  "pylint_error": "❌ Error running pylint: {{ error_message }}",
60
60
  "unmerged_blocks_warning": "⚠️ Found {{ num_blocks }} unmerged blocks, the changes will not be applied. Please review them manually then try again.",
@@ -65,7 +65,7 @@ MESSAGES = {
65
65
  "merge_failed": "❌ Merge file {{ path }} failed: {{ error }}",
66
66
  "files_merged_total": "✅ Merged {{ total }} files into the project.",
67
67
  "ranking_skip": "Only 1 candidate, skip ranking",
68
- "ranking_start": "Start ranking {{ count }} candidates",
68
+ "ranking_start": "Start ranking {{ count }} candidates using model {{ model_name }}",
69
69
  "ranking_failed_request": "Ranking request failed: {{ error }}",
70
70
  "ranking_all_failed": "All ranking requests failed",
71
71
  "ranking_complete": "Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}",
@@ -73,13 +73,17 @@ MESSAGES = {
73
73
  "ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
74
74
  "begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
75
75
  "stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
76
+ "quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
76
77
  "upsert_file": "✅ Updated file: {{ file_path }}",
77
- "unmerged_blocks_title": "Unmerged Blocks",
78
+ "unmerged_blocks_title": "Unmerged Blocks",
79
+ "quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
80
+ "quick_filter_failed": "❌ Quick filter failed: {{ error }}. ",
78
81
  "unmerged_file_path": "File: {{file_path}}",
79
82
  "unmerged_search_block": "Search Block({{similarity}}):",
80
83
  "unmerged_replace_block": "Replace Block:",
81
84
  "unmerged_blocks_total": "Total unmerged blocks: {{num_blocks}}",
82
- "git_init_required": "⚠️ auto_merge only applies to git repositories.\n\nPlease try using git init in the source directory:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\nThen run auto - coder again.\nError: {{ error }}"
85
+ "git_init_required": "⚠️ auto_merge only applies to git repositories.\n\nPlease try using git init in the source directory:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\nThen run auto - coder again.\nError: {{ error }}",
86
+ "quick_filter_reason": "Auto get(quick_filter mode)"
83
87
  },
84
88
  "zh": {
85
89
  "model_not_found": "未找到模型: {{model_name}}",
@@ -93,8 +97,8 @@ MESSAGES = {
93
97
  "no_latest_commit": "无法获取最新的提交信息",
94
98
  "code_review_error": "代码审查过程出错: {{error}}",
95
99
  "index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
96
- "index_update_success": "✅ 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
97
- "index_build_error": "❌ 构建 {{ file_path }} 索引时出错: {{ error }}",
100
+ "index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
101
+ "index_build_error": "❌ {{ model_name }} 构建 {{ file_path }} 索引时出错: {{ error }}",
98
102
  "index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
99
103
  "building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
100
104
  "index_source_dir_mismatch": "⚠️ 源目录不匹配 (文件路径: {{ file_path }}, 源目录: {{ source_dir }})",
@@ -132,8 +136,8 @@ MESSAGES = {
132
136
  "code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
133
137
  "code_merge_start": "正在自动合并代码...",
134
138
  "code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
135
- "quick_filter_start": "开始查找上下文(quick_filter)...",
136
- "normal_filter_start": "开始查找上下文(normal_filter)...",
139
+ "quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
140
+ "normal_filter_start": "{{ model_name }} 开始查找上下文(normal_filter)...",
137
141
  "pylint_check_failed": "⚠️ Pylint 检查失败: {{ error_message }}",
138
142
  "pylint_error": "❌ 运行 Pylint 时出错: {{ error_message }}",
139
143
  "begin_index_source_code": "🚀 开始为 {{ source_dir }} 中的源代码建立索引",
@@ -146,19 +150,23 @@ MESSAGES = {
146
150
  "unmerged_search_block": "Search Block({{similarity}}):",
147
151
  "unmerged_replace_block": "Replace Block:",
148
152
  "unmerged_blocks_total": "未合并代码块数量: {{num_blocks}}",
149
- "git_init_required": "⚠️ auto_merge 仅适用于 git 仓库。\n\n请尝试在源目录中使用 git init:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\n然后再次运行 auto-coder。\n错误: {{ error }}",
153
+ "git_init_required": "⚠️ auto_merge 仅适用于 git 仓库。\n\n请尝试在源目录中使用 git init:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\n然后再次运行 auto-coder。\n错误: {{ error }}",
154
+ "quick_filter_reason": "自动获取(quick_filter模式)",
150
155
  "upsert_file": "✅ 更新文件: {{ file_path }}",
151
156
  "files_merged": "✅ 成功合并了 {{ total }} 个文件到项目中。",
152
157
  "merge_failed": "❌ 合并文件 {{ path }} 失败: {{ error }}",
153
158
  "files_merged_total": "✅ 合并了 {{ total }} 个文件到项目中。",
154
159
  "ranking_skip": "只有1个候选项,跳过排序",
155
- "ranking_start": "开始对 {{ count }} 个候选项进行排序",
160
+ "ranking_start": "开始对 {{ count }} 个候选项进行排序,使用模型 {{ model_name }} 打分",
156
161
  "ranking_failed_request": "排序请求失败: {{ error }}",
157
162
  "ranking_all_failed": "所有排序请求都失败",
158
163
  "ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
159
164
  "ranking_process_failed": "排序过程失败: {{ error }}",
160
165
  "ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
161
- "stream_out_stats": "耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}"
166
+ "stream_out_stats": "耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
167
+ "quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
168
+ "quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
169
+ "quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
162
170
  },
163
171
  }
164
172
 
@@ -7,6 +7,7 @@ from autocoder.common import sys_prompt
7
7
  from concurrent.futures import ThreadPoolExecutor
8
8
  from autocoder.common.types import CodeGenerateResult
9
9
  from autocoder.common.utils_code_auto_generate import chat_with_continue
10
+ import json
10
11
 
11
12
 
12
13
  class CodeAutoGenerate:
@@ -77,8 +77,15 @@ class CodeModificationRanker:
77
77
  with ThreadPoolExecutor(max_workers=total_tasks) as executor:
78
78
  # Submit tasks for each model and generate_times
79
79
  futures = []
80
- for llm in self.llms:
80
+ for llm in self.llms:
81
+ model_name = getattr(llm, 'default_model_name', None)
82
+ if not model_name:
83
+ model_name = "unknown(without default model name)"
84
+ self.printer.print_in_terminal(
85
+ "ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
86
+
81
87
  for _ in range(rank_times):
88
+
82
89
  futures.append(
83
90
  executor.submit(
84
91
  chat_with_continue,
@@ -262,7 +262,9 @@ def get_uncommitted_changes(repo_path: str) -> str:
262
262
  def generate_commit_message(changes_report: str) -> str:
263
263
  '''
264
264
  我是一个Git提交信息生成助手。我们的目标是通过一些变更报告,倒推用户的需求,将需求作为commit message。
265
- commit message 需要简洁,不要超过100个字符。
265
+ commit message 需要简洁,包含两部分:
266
+ 1. 这个commit 背后的需求是什么
267
+ 2. 为了完成这个需求做了哪些事情
266
268
 
267
269
  下面是一些示例:
268
270
  <examples>
@@ -613,7 +615,7 @@ def print_commit_info(commit_result: CommitResult):
613
615
 
614
616
  table.add_row("Commit Hash", commit_result.commit_hash)
615
617
  table.add_row("Commit Message", commit_result.commit_message)
616
- table.add_row("Changed Files", "\n".join(commit_result.changed_files))
618
+ table.add_row("Changed Files", "\n".join(commit_result.changed_files) if commit_result.changed_files else "No files changed")
617
619
 
618
620
  console.print(
619
621
  Panel(table, expand=False, border_style="green", title="Git Commit Summary")
autocoder/index/entry.py CHANGED
@@ -101,13 +101,19 @@ def build_index_and_filter_files(
101
101
  )
102
102
  )
103
103
 
104
- if not args.skip_filter_index and args.index_filter_model:
105
- printer.print_in_terminal("quick_filter_start", style="blue")
104
+ if not args.skip_filter_index and args.index_filter_model:
105
+ model_name = getattr(index_manager.index_filter_llm, 'default_model_name', None)
106
+ if not model_name:
107
+ model_name = "unknown(without default model name)"
108
+ printer.print_in_terminal("quick_filter_start", style="blue", model_name=model_name)
106
109
  quick_filter = QuickFilter(index_manager,stats,sources)
107
110
  final_files = quick_filter.filter(index_manager.read_index(),args.query)
108
111
 
109
112
  if not args.skip_filter_index and not args.index_filter_model:
110
- printer.print_in_terminal("normal_filter_start", style="blue")
113
+ model_name = getattr(index_manager.llm, 'default_model_name', None)
114
+ if not model_name:
115
+ model_name = "unknown(without default model name)"
116
+ printer.print_in_terminal("normal_filter_start", style="blue",model_name=model_name)
111
117
  normal_filter = NormalFilter(index_manager,stats,sources)
112
118
  final_files = normal_filter.filter(index_manager.read_index(),args.query)
113
119
 
@@ -60,7 +60,7 @@ class NormalFilter():
60
60
  phase_end = time.monotonic()
61
61
  self.stats["timings"]["normal_filter"]["level1_filter"] = phase_end - phase_start
62
62
 
63
- # Phase 4: Level 2 filtering - Related files
63
+ # Phase 4: Level 2 filtering - Related files
64
64
  if target_files is not None and self.args.index_filter_level >= 2:
65
65
  logger.info(
66
66
  "Phase 4: Performing Level 2 filtering (related files)...")
@@ -84,13 +84,14 @@ class NormalFilter():
84
84
  phase_end = time.monotonic()
85
85
  self.stats["timings"]["normal_filter"]["level2_filter"] = phase_end - phase_start
86
86
 
87
- if not final_files:
88
- logger.warning("No related files found, using all files")
89
- for source in self.sources:
90
- final_files[get_file_path(source.module_name)] = TargetFile(
91
- file_path=source.module_name,
92
- reason="No related files found, use all files",
93
- )
87
+ # if not final_files:
88
+ # logger.warning("No related files found, using all files")
89
+ # for source in self.sources:
90
+ # final_files[get_file_path(source.module_name)] = TargetFile(
91
+ # file_path=source.module_name,
92
+ # reason="No related files found, use all files",
93
+ # )
94
+
94
95
 
95
96
  # Phase 5: Relevance verification
96
97
  logger.info("Phase 5: Performing relevance verification...")
@@ -1,4 +1,7 @@
1
- from typing import List, Union,Dict,Any
1
+ from typing import List, Union, Dict, Any
2
+ from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
3
+ from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
4
+ from byzerllm.utils.str2model import to_model
2
5
  from autocoder.index.types import IndexItem
3
6
  from autocoder.common import AutoCoderArgs,SourceCode
4
7
  import byzerllm
@@ -11,6 +14,7 @@ from autocoder.index.types import (
11
14
  )
12
15
  from autocoder.rag.token_counter import count_tokens
13
16
  from loguru import logger
17
+ from autocoder.common.printer import Printer
14
18
 
15
19
 
16
20
  def get_file_path(file_path):
@@ -25,6 +29,7 @@ class QuickFilter():
25
29
  self.args = index_manager.args
26
30
  self.stats = stats
27
31
  self.sources = sources
32
+ self.printer = Printer()
28
33
 
29
34
  @byzerllm.prompt()
30
35
  def quick_filter_files(self,file_meta_list:List[IndexItem],query:str) -> str:
@@ -69,6 +74,7 @@ class QuickFilter():
69
74
 
70
75
  def filter(self, index_items: List[IndexItem], query: str) -> Dict[str, TargetFile]:
71
76
  final_files: Dict[str, TargetFile] = {}
77
+
72
78
  if not self.args.skip_filter_index and self.args.index_filter_model:
73
79
  start_time = time.monotonic()
74
80
  index_items = self.index_manager.read_index()
@@ -82,17 +88,47 @@ class QuickFilter():
82
88
  return final_files
83
89
 
84
90
  try:
85
- file_number_list = self.quick_filter_files.with_llm(
86
- self.index_manager.index_filter_llm).with_return_type(FileNumberList).run(index_items, self.args.query)
91
+ model_name = getattr(self.index_manager.index_filter_llm, 'default_model_name', None)
92
+ if not model_name:
93
+ model_name = "unknown(without default model name)"
94
+
95
+ # 渲染 Prompt 模板
96
+ query = self.quick_filter_files.prompt(index_items, self.args.query)
97
+
98
+ # 使用流式输出处理
99
+ stream_generator = stream_chat_with_continue(
100
+ self.index_manager.index_filter_llm,
101
+ [{"role": "user", "content": query}],
102
+ {}
103
+ )
104
+
105
+ # 获取完整响应
106
+ full_response, last_meta = stream_out(
107
+ stream_generator,
108
+ model_name=model_name,
109
+ title=self.printer.get_message_from_key_with_format("quick_filter_title", model_name=model_name)
110
+ )
111
+ # 解析结果
112
+ file_number_list = to_model(full_response, FileNumberList)
113
+
114
+ # 打印 token 统计信息
115
+ self.printer.print_in_terminal(
116
+ "quick_filter_stats",
117
+ style="blue",
118
+ elapsed_time=f"{end_time - start_time:.2f}",
119
+ input_tokens=last_meta.input_tokens_count,
120
+ output_tokens=last_meta.generated_tokens_count
121
+ )
122
+
87
123
  except Exception as e:
88
- logger.error(f"Quick filter failed, error: {str(e)} fallback to normal filter")
124
+ self.printer.print_error(self.printer.get_message_from_key_with_format("quick_filter_failed", error=str(e)))
89
125
  return final_files
90
126
 
91
127
  if file_number_list:
92
128
  for file_number in file_number_list.file_list:
93
129
  final_files[get_file_path(index_items[file_number].module_name)] = TargetFile(
94
130
  file_path=index_items[file_number].module_name,
95
- reason="Quick Filter"
131
+ reason=self.printer.get_message_from_key("quick_filter_reason")
96
132
  )
97
133
  end_time = time.monotonic()
98
134
  self.stats["timings"]["quick_filter"] = end_time - start_time
autocoder/index/index.py CHANGED
@@ -195,7 +195,7 @@ class IndexManager:
195
195
  return True
196
196
  return False
197
197
 
198
- def build_index_for_single_source(self, source: SourceCode):
198
+ def build_index_for_single_source(self, source: SourceCode):
199
199
  file_path = source.module_name
200
200
  if not os.path.exists(file_path):
201
201
  return None
@@ -205,6 +205,10 @@ class IndexManager:
205
205
 
206
206
  md5 = hashlib.md5(source.source_code.encode("utf-8")).hexdigest()
207
207
 
208
+ model_name = getattr(self.index_llm, 'default_model_name', None)
209
+ if not model_name:
210
+ model_name = "unknown(without default model name)"
211
+
208
212
  try:
209
213
  start_time = time.monotonic()
210
214
  source_code = source.source_code
@@ -230,13 +234,14 @@ class IndexManager:
230
234
  symbols = self.get_all_file_symbols.with_llm(
231
235
  self.index_llm).run(source.module_name, source_code)
232
236
  time.sleep(self.anti_quota_limit)
233
-
237
+
234
238
  self.printer.print_in_terminal(
235
239
  "index_update_success",
236
240
  style="green",
237
241
  file_path=file_path,
238
242
  md5=md5,
239
- duration=time.monotonic() - start_time
243
+ duration=time.monotonic() - start_time,
244
+ model_name=model_name
240
245
  )
241
246
 
242
247
  except Exception as e:
@@ -246,7 +251,8 @@ class IndexManager:
246
251
  "index_build_error",
247
252
  style="red",
248
253
  file_path=file_path,
249
- error=str(e)
254
+ error=str(e),
255
+ model_name=model_name
250
256
  )
251
257
  return None
252
258
 
@@ -464,7 +470,7 @@ class IndexManager:
464
470
  {file.file_path: file for file in all_results}.values())
465
471
  return FileList(file_list=all_results)
466
472
 
467
- def _query_index_with_thread(self, query, func):
473
+ def _query_index_with_thread(self, query, func):
468
474
  all_results = []
469
475
  lock = threading.Lock()
470
476
  completed_threads = 0
autocoder/models.py CHANGED
@@ -113,6 +113,14 @@ def get_model_by_name(name: str) -> Dict:
113
113
  raise Exception(get_message_with_format("model_not_found", model_name=name))
114
114
  return v[0]
115
115
 
116
+
117
+ def check_model_exists(name: str) -> bool:
118
+ """
119
+ 检查模型是否存在
120
+ """
121
+ models = load_models()
122
+ return any(m["name"] == name.strip() for m in models)
123
+
116
124
  def update_model_with_api_key(name: str, api_key: str) -> Dict:
117
125
  """
118
126
  根据模型名称查找并更新模型的 api_key_path。
@@ -1,5 +1,5 @@
1
1
  from autocoder.common import SourceCode, AutoCoderArgs
2
- from autocoder import common as FileUtils
2
+ from autocoder import common as CommonUtils
3
3
  from autocoder.utils.rest import HttpDoc
4
4
  import os
5
5
  from typing import Optional, Generator, List, Dict, Any
@@ -164,7 +164,7 @@ class TSProject:
164
164
  logger.warning(f"Failed to read file: {file_path}. Error: {str(e)}")
165
165
  return None
166
166
 
167
- if not FileUtils.has_sufficient_content(source_code, min_line_count=1):
167
+ if not CommonUtils.has_sufficient_content(source_code, min_line_count=1):
168
168
  return None
169
169
 
170
170
  return SourceCode(module_name=module_name, source_code=source_code)
@@ -9,6 +9,7 @@ from typing import Generator, List, Dict, Any, Optional, Tuple, Literal
9
9
  from autocoder.utils.request_queue import RequestValue, RequestOption, StreamValue
10
10
  from autocoder.utils.request_queue import request_queue
11
11
  import time
12
+ from byzerllm.utils.types import SingleOutputMeta
12
13
 
13
14
  MAX_HISTORY_LINES = 40 # 最大保留历史行数
14
15
 
@@ -141,8 +142,10 @@ def multi_stream_out(
141
142
  def stream_out(
142
143
  stream_generator: Generator[Tuple[str, Dict[str, Any]], None, None],
143
144
  request_id: Optional[str] = None,
144
- console: Optional[Console] = None
145
- ) -> Tuple[str, Optional[Dict[str, Any]]]:
145
+ console: Optional[Console] = None,
146
+ model_name: Optional[str] = None,
147
+ title: Optional[str] = None
148
+ ) -> Tuple[str, Optional[SingleOutputMeta]]:
146
149
  """
147
150
  处理流式输出事件并在终端中展示
148
151
 
@@ -150,7 +153,8 @@ def stream_out(
150
153
  stream_generator: 生成流式输出的生成器
151
154
  request_id: 请求ID,用于更新请求队列
152
155
  console: Rich Console对象
153
-
156
+ model_name: 模型名称
157
+ title: 面板标题,如果没有提供则使用默认值
154
158
  Returns:
155
159
  Tuple[str, Dict[str, Any]]: 返回完整的响应内容和最后的元数据
156
160
  """
@@ -161,10 +165,10 @@ def stream_out(
161
165
  current_line = "" # 当前行
162
166
  assistant_response = ""
163
167
  last_meta = None
164
-
165
- try:
168
+ panel_title = title if title is not None else f"Response[ {model_name} ]"
169
+ try:
166
170
  with Live(
167
- Panel("", title="Response", border_style="green"),
171
+ Panel("", title=panel_title, border_style="green"),
168
172
  refresh_per_second=4,
169
173
  console=console
170
174
  ) as live:
@@ -209,7 +213,7 @@ def stream_out(
209
213
  live.update(
210
214
  Panel(
211
215
  Markdown(display_content),
212
- title="Response",
216
+ title=panel_title,
213
217
  border_style="green",
214
218
  height=min(50, live.console.height - 4)
215
219
  )
@@ -223,7 +227,7 @@ def stream_out(
223
227
  live.update(
224
228
  Panel(
225
229
  Markdown(assistant_response),
226
- title="Final Response",
230
+ title=f"Final {panel_title}",
227
231
  border_style="blue"
228
232
  )
229
233
  )
@@ -231,7 +235,7 @@ def stream_out(
231
235
  except Exception as e:
232
236
  console.print(Panel(
233
237
  f"Error: {str(e)}",
234
- title="Error",
238
+ title=f"Error[ {panel_title} ]",
235
239
  border_style="red"
236
240
  ))
237
241
  # import traceback
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.245"
1
+ __version__ = "0.1.247"