auto-coder 0.1.245__py3-none-any.whl → 0.1.246__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/METADATA +1 -1
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/RECORD +16 -16
- autocoder/auto_coder.py +59 -75
- autocoder/chat_auto_coder.py +4 -3
- autocoder/common/auto_coder_lang.py +6 -6
- autocoder/index/entry.py +9 -3
- autocoder/index/filter/normal_filter.py +9 -8
- autocoder/index/index.py +11 -5
- autocoder/models.py +8 -0
- autocoder/tsproject/__init__.py +2 -2
- autocoder/utils/auto_coder_utils/chat_stream_out.py +6 -5
- autocoder/version.py +1 -1
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.245.dist-info → auto_coder-0.1.246.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
autocoder/auto_coder.py,sha256=
|
|
2
|
+
autocoder/auto_coder.py,sha256=VoSRpvXRLIL3BKGudqFVeO8qolFAjKKWbUbbNQUikDE,63074
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
4
|
autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
|
|
5
5
|
autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
7
|
autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
|
|
8
8
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
9
|
-
autocoder/chat_auto_coder.py,sha256=
|
|
9
|
+
autocoder/chat_auto_coder.py,sha256=wdFPshtAWiFrDkCKHSxKKDhcnHQnd9mW0vQZcaRQlC4,105965
|
|
10
10
|
autocoder/chat_auto_coder_lang.py,sha256=gbpjfMd1wYiIrOlLDc-G7eI497mMwjM_ud9GvO-wo9k,15261
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
|
-
autocoder/models.py,sha256=
|
|
14
|
-
autocoder/version.py,sha256=
|
|
13
|
+
autocoder/models.py,sha256=_9Kc8oS_tnnqKzRGnybgOfn1NOey7OXZ8y9qhBmgiB4,5517
|
|
14
|
+
autocoder/version.py,sha256=5HxDu_oJkWkJDjnFdCKsIoI4evQ0gJJY_wccUUoUJTU,23
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -29,7 +29,7 @@ autocoder/common/__init__.py,sha256=2isE_u4VgfogwmcUCnFcussVFlzeNOLHDMFm5z_axbU,
|
|
|
29
29
|
autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
|
|
30
30
|
autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
|
|
31
31
|
autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
|
|
32
|
-
autocoder/common/auto_coder_lang.py,sha256=
|
|
32
|
+
autocoder/common/auto_coder_lang.py,sha256=Slwyy0LNqsgHbyWIImYz8l4ke9cT9GagZA4HtbTXIX0,14065
|
|
33
33
|
autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
|
|
34
34
|
autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
|
|
35
35
|
autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
|
|
@@ -78,13 +78,13 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
|
|
|
78
78
|
autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ht_HWzZt84IEogoFMggnXI6aFFerrsuksVflAkcodfU,5545
|
|
79
79
|
autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
|
|
80
80
|
autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
81
|
-
autocoder/index/entry.py,sha256=
|
|
81
|
+
autocoder/index/entry.py,sha256=hXSOi-jPgRBKQB55eqWkT95vxvWrbVHDuIMtDkqjNNw,12232
|
|
82
82
|
autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
|
|
83
|
-
autocoder/index/index.py,sha256=
|
|
83
|
+
autocoder/index/index.py,sha256=xwh22nY0TtEJMJwYjOUd6xdRZozYXBt47YSwheZSP-4,20679
|
|
84
84
|
autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
|
|
85
85
|
autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
|
|
86
86
|
autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
87
|
-
autocoder/index/filter/normal_filter.py,sha256=
|
|
87
|
+
autocoder/index/filter/normal_filter.py,sha256=APu34iSvWhtlLtWgkj8N3Vo4oW1TegtZQq2bwDX_cs4,8031
|
|
88
88
|
autocoder/index/filter/quick_filter.py,sha256=Omvsz9O1xQEH4xP-wNuCZhxn69P7Y59SiLPUIDuGFiA,3851
|
|
89
89
|
autocoder/pyproject/__init__.py,sha256=dQ2_7YZ7guybT9BhfxSGn43eLQJGQN2zgeKa6--JlaQ,14403
|
|
90
90
|
autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -121,7 +121,7 @@ autocoder/rag/stream_event/types.py,sha256=rtLwOE8rShmi1dJdxyBpAV5ZjLBGG9vptMiSz
|
|
|
121
121
|
autocoder/regex_project/__init__.py,sha256=EBZeCL5ORyD_9_5u_UuG4s7XtpXOu0y1sWDmxWFtufE,6781
|
|
122
122
|
autocoder/regexproject/__init__.py,sha256=cEr-ZOaQjLD5sx7T7F2DhD5ips03HcJ02rded9EpSXc,9693
|
|
123
123
|
autocoder/suffixproject/__init__.py,sha256=VcXjUbGf3uQrpoqVCItDvGG9DoeHJ_qEmghKwrVNw9w,11058
|
|
124
|
-
autocoder/tsproject/__init__.py,sha256=
|
|
124
|
+
autocoder/tsproject/__init__.py,sha256=boNuRCHi94xI_y4tvL5LKgSZ4gYxcPqUUQTw9MU_STI,11751
|
|
125
125
|
autocoder/utils/__init__.py,sha256=KtcGElFNBgZPF7dEL8zF9JpXkCAjoyDrzaREJBhJrcs,994
|
|
126
126
|
autocoder/utils/_markitdown.py,sha256=RU88qn4eZfYIy0GDrPxlI8oYXIypbi63VRJjdlnE0VU,47431
|
|
127
127
|
autocoder/utils/coder.py,sha256=rK8e0svQBe0NOP26dIGToUXgha_hUDgxlWoC_p_r7oc,5698
|
|
@@ -139,11 +139,11 @@ autocoder/utils/rest.py,sha256=hLBhr78y-WVnV0oQf9Rxc22EwqF78KINkScvYa1MuYA,6435
|
|
|
139
139
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
140
140
|
autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
141
141
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
142
|
-
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=
|
|
142
|
+
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=64WfP5rFKYwI9OXrpRTQjUQa9n6ULTscubKziot-rAU,9218
|
|
143
143
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
144
|
-
auto_coder-0.1.
|
|
145
|
-
auto_coder-0.1.
|
|
146
|
-
auto_coder-0.1.
|
|
147
|
-
auto_coder-0.1.
|
|
148
|
-
auto_coder-0.1.
|
|
149
|
-
auto_coder-0.1.
|
|
144
|
+
auto_coder-0.1.246.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
145
|
+
auto_coder-0.1.246.dist-info/METADATA,sha256=zENdZdSfMKFmZWd-N1GIOXfm8xWnBv0nG-s9I3UpZqY,2616
|
|
146
|
+
auto_coder-0.1.246.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
147
|
+
auto_coder-0.1.246.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
148
|
+
auto_coder-0.1.246.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
149
|
+
auto_coder-0.1.246.dist-info/RECORD,,
|
autocoder/auto_coder.py
CHANGED
|
@@ -300,86 +300,66 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
300
300
|
llm.setup_sub_client("chat_model", chat_model)
|
|
301
301
|
|
|
302
302
|
if args.product_mode == "lite":
|
|
303
|
-
default_model = args.model
|
|
303
|
+
default_model = args.model
|
|
304
|
+
model_info = models_module.get_model_by_name(default_model)
|
|
304
305
|
llm = byzerllm.SimpleByzerLLM(default_model_name=default_model)
|
|
305
|
-
api_key_dir = os.path.expanduser("~/.auto-coder/keys")
|
|
306
|
-
api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
|
|
307
|
-
|
|
308
|
-
if not os.path.exists(api_key_file):
|
|
309
|
-
raise Exception(f"API key file not found: {api_key_file}")
|
|
310
|
-
|
|
311
|
-
with open(api_key_file, "r") as f:
|
|
312
|
-
api_key = f.read()
|
|
313
|
-
|
|
314
306
|
llm.deploy(
|
|
315
307
|
model_path="",
|
|
316
|
-
pretrained_model_type="
|
|
317
|
-
udf_name=
|
|
308
|
+
pretrained_model_type=model_info["model_type"],
|
|
309
|
+
udf_name=args.model,
|
|
318
310
|
infer_params={
|
|
319
|
-
"saas.base_url": "
|
|
320
|
-
"saas.api_key": api_key,
|
|
321
|
-
"saas.model": "
|
|
322
|
-
"saas.is_reasoning":
|
|
311
|
+
"saas.base_url": model_info["base_url"],
|
|
312
|
+
"saas.api_key": model_info["api_key"],
|
|
313
|
+
"saas.model": model_info["model_name"],
|
|
314
|
+
"saas.is_reasoning": model_info["is_reasoning"]
|
|
323
315
|
}
|
|
324
|
-
)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
"saas
|
|
333
|
-
"
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
model_path="",
|
|
342
|
-
pretrained_model_type="saas/openai",
|
|
343
|
-
udf_name="deepseek_r1_chat",
|
|
344
|
-
infer_params={
|
|
345
|
-
"saas.base_url": "https://api.deepseek.com/v1",
|
|
346
|
-
"saas.api_key": api_key,
|
|
347
|
-
"saas.model": "deepseek-reasoner",
|
|
348
|
-
"saas.is_reasoning": True
|
|
349
|
-
}
|
|
350
|
-
)
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if models_module.check_model_exists("deepseek_r1_chat"):
|
|
319
|
+
r1_model_info = models_module.get_model_by_name("deepseek_r1_chat")
|
|
320
|
+
api_key = r1_model_info["api_key"]
|
|
321
|
+
chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
322
|
+
chat_llm.deploy(
|
|
323
|
+
model_path="",
|
|
324
|
+
pretrained_model_type="saas/openai",
|
|
325
|
+
udf_name="deepseek_r1_chat",
|
|
326
|
+
infer_params={
|
|
327
|
+
"saas.base_url": "https://api.deepseek.com/v1",
|
|
328
|
+
"saas.api_key": api_key,
|
|
329
|
+
"saas.model": "deepseek-reasoner",
|
|
330
|
+
"saas.is_reasoning": True
|
|
331
|
+
}
|
|
332
|
+
)
|
|
351
333
|
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
334
|
+
generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
335
|
+
generate_rerank_llm.deploy(
|
|
336
|
+
model_path="",
|
|
337
|
+
pretrained_model_type="saas/openai",
|
|
338
|
+
udf_name="deepseek_r1_chat",
|
|
339
|
+
infer_params={
|
|
340
|
+
"saas.base_url": "https://api.deepseek.com/v1",
|
|
341
|
+
"saas.api_key": api_key,
|
|
342
|
+
"saas.model": "deepseek-reasoner",
|
|
343
|
+
"saas.is_reasoning": True
|
|
344
|
+
}
|
|
345
|
+
)
|
|
364
346
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
|
|
382
|
-
llm.setup_sub_client("index_filter_model", index_filter_llm)
|
|
347
|
+
index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
|
|
348
|
+
index_filter_llm.deploy(
|
|
349
|
+
model_path="",
|
|
350
|
+
pretrained_model_type="saas/openai",
|
|
351
|
+
udf_name="deepseek_r1_chat",
|
|
352
|
+
infer_params={
|
|
353
|
+
"saas.base_url": "https://api.deepseek.com/v1",
|
|
354
|
+
"saas.api_key": api_key,
|
|
355
|
+
"saas.model": "deepseek-reasoner",
|
|
356
|
+
"saas.is_reasoning": True
|
|
357
|
+
}
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
llm.setup_sub_client("chat_model", chat_llm)
|
|
361
|
+
llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
|
|
362
|
+
llm.setup_sub_client("index_filter_model", index_filter_llm)
|
|
383
363
|
|
|
384
364
|
if args.product_mode == "lite":
|
|
385
365
|
# Set up default models based on configuration
|
|
@@ -1367,12 +1347,16 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1367
1347
|
llm_config={}
|
|
1368
1348
|
)
|
|
1369
1349
|
|
|
1370
|
-
|
|
1350
|
+
|
|
1351
|
+
model_name = getattr(chat_llm, 'default_model_name', None)
|
|
1352
|
+
if not model_name:
|
|
1353
|
+
model_name = "unknown(without default model name)"
|
|
1371
1354
|
|
|
1372
1355
|
assistant_response, last_meta = stream_out(
|
|
1373
1356
|
v,
|
|
1374
1357
|
request_id=args.request_id,
|
|
1375
|
-
console=console
|
|
1358
|
+
console=console,
|
|
1359
|
+
model_name=model_name
|
|
1376
1360
|
)
|
|
1377
1361
|
|
|
1378
1362
|
# 打印耗时和token统计
|
autocoder/chat_auto_coder.py
CHANGED
|
@@ -2706,9 +2706,10 @@ def main():
|
|
|
2706
2706
|
memory["mode"] = "normal"
|
|
2707
2707
|
|
|
2708
2708
|
# 处理 user_input 的空格
|
|
2709
|
-
|
|
2710
|
-
|
|
2711
|
-
|
|
2709
|
+
if user_input:
|
|
2710
|
+
temp_user_input = user_input.lstrip() # 去掉左侧空格
|
|
2711
|
+
if temp_user_input.startswith('/'):
|
|
2712
|
+
user_input = temp_user_input
|
|
2712
2713
|
|
|
2713
2714
|
if (
|
|
2714
2715
|
memory["mode"] == "auto_detect"
|
|
@@ -14,8 +14,8 @@ MESSAGES = {
|
|
|
14
14
|
"no_latest_commit": "Unable to get latest commit information",
|
|
15
15
|
"code_review_error": "Code review process error: {{error}}",
|
|
16
16
|
"index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
|
|
17
|
-
"index_update_success": "✅ Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
|
|
18
|
-
"index_build_error": "❌ Error building index for {{ file_path }}: {{ error }}",
|
|
17
|
+
"index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
|
|
18
|
+
"index_build_error": "❌ {{ model_name }} Error building index for {{ file_path }}: {{ error }}",
|
|
19
19
|
"index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
|
|
20
20
|
"building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
|
|
21
21
|
"index_source_dir_mismatch": "⚠️ Source directory mismatch (file_path: {{ file_path }}, source_dir: {{ source_dir }})",
|
|
@@ -54,7 +54,7 @@ MESSAGES = {
|
|
|
54
54
|
"code_merge_start": "Auto merge the code...",
|
|
55
55
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
56
56
|
"quick_filter_start": "Starting filter context(quick_filter)...",
|
|
57
|
-
"normal_filter_start": "Starting filter context(normal_filter)...",
|
|
57
|
+
"normal_filter_start": "{{ model_name }} Starting filter context(normal_filter)...",
|
|
58
58
|
"pylint_check_failed": "⚠️ Pylint check failed: {{ error_message }}",
|
|
59
59
|
"pylint_error": "❌ Error running pylint: {{ error_message }}",
|
|
60
60
|
"unmerged_blocks_warning": "⚠️ Found {{ num_blocks }} unmerged blocks, the changes will not be applied. Please review them manually then try again.",
|
|
@@ -93,8 +93,8 @@ MESSAGES = {
|
|
|
93
93
|
"no_latest_commit": "无法获取最新的提交信息",
|
|
94
94
|
"code_review_error": "代码审查过程出错: {{error}}",
|
|
95
95
|
"index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
|
|
96
|
-
"index_update_success": "✅ 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
|
|
97
|
-
"index_build_error": "❌ 构建 {{ file_path }} 索引时出错: {{ error }}",
|
|
96
|
+
"index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
|
|
97
|
+
"index_build_error": "❌ {{ model_name }} 构建 {{ file_path }} 索引时出错: {{ error }}",
|
|
98
98
|
"index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
|
|
99
99
|
"building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
|
|
100
100
|
"index_source_dir_mismatch": "⚠️ 源目录不匹配 (文件路径: {{ file_path }}, 源目录: {{ source_dir }})",
|
|
@@ -133,7 +133,7 @@ MESSAGES = {
|
|
|
133
133
|
"code_merge_start": "正在自动合并代码...",
|
|
134
134
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
135
135
|
"quick_filter_start": "开始查找上下文(quick_filter)...",
|
|
136
|
-
"normal_filter_start": "开始查找上下文(normal_filter)...",
|
|
136
|
+
"normal_filter_start": "{{ model_name }} 开始查找上下文(normal_filter)...",
|
|
137
137
|
"pylint_check_failed": "⚠️ Pylint 检查失败: {{ error_message }}",
|
|
138
138
|
"pylint_error": "❌ 运行 Pylint 时出错: {{ error_message }}",
|
|
139
139
|
"begin_index_source_code": "🚀 开始为 {{ source_dir }} 中的源代码建立索引",
|
autocoder/index/entry.py
CHANGED
|
@@ -101,13 +101,19 @@ def build_index_and_filter_files(
|
|
|
101
101
|
)
|
|
102
102
|
)
|
|
103
103
|
|
|
104
|
-
if not args.skip_filter_index and args.index_filter_model:
|
|
105
|
-
|
|
104
|
+
if not args.skip_filter_index and args.index_filter_model:
|
|
105
|
+
model_name = getattr(index_manager.index_filter_llm, 'default_model_name', None)
|
|
106
|
+
if not model_name:
|
|
107
|
+
model_name = "unknown(without default model name)"
|
|
108
|
+
printer.print_in_terminal("quick_filter_start", style="blue", model_name=model_name)
|
|
106
109
|
quick_filter = QuickFilter(index_manager,stats,sources)
|
|
107
110
|
final_files = quick_filter.filter(index_manager.read_index(),args.query)
|
|
108
111
|
|
|
109
112
|
if not args.skip_filter_index and not args.index_filter_model:
|
|
110
|
-
|
|
113
|
+
model_name = getattr(index_manager.llm, 'default_model_name', None)
|
|
114
|
+
if not model_name:
|
|
115
|
+
model_name = "unknown(without default model name)"
|
|
116
|
+
printer.print_in_terminal("normal_filter_start", style="blue",model_name=model_name)
|
|
111
117
|
normal_filter = NormalFilter(index_manager,stats,sources)
|
|
112
118
|
final_files = normal_filter.filter(index_manager.read_index(),args.query)
|
|
113
119
|
|
|
@@ -60,7 +60,7 @@ class NormalFilter():
|
|
|
60
60
|
phase_end = time.monotonic()
|
|
61
61
|
self.stats["timings"]["normal_filter"]["level1_filter"] = phase_end - phase_start
|
|
62
62
|
|
|
63
|
-
# Phase 4: Level 2 filtering - Related files
|
|
63
|
+
# Phase 4: Level 2 filtering - Related files
|
|
64
64
|
if target_files is not None and self.args.index_filter_level >= 2:
|
|
65
65
|
logger.info(
|
|
66
66
|
"Phase 4: Performing Level 2 filtering (related files)...")
|
|
@@ -84,13 +84,14 @@ class NormalFilter():
|
|
|
84
84
|
phase_end = time.monotonic()
|
|
85
85
|
self.stats["timings"]["normal_filter"]["level2_filter"] = phase_end - phase_start
|
|
86
86
|
|
|
87
|
-
if not final_files:
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
87
|
+
# if not final_files:
|
|
88
|
+
# logger.warning("No related files found, using all files")
|
|
89
|
+
# for source in self.sources:
|
|
90
|
+
# final_files[get_file_path(source.module_name)] = TargetFile(
|
|
91
|
+
# file_path=source.module_name,
|
|
92
|
+
# reason="No related files found, use all files",
|
|
93
|
+
# )
|
|
94
|
+
|
|
94
95
|
|
|
95
96
|
# Phase 5: Relevance verification
|
|
96
97
|
logger.info("Phase 5: Performing relevance verification...")
|
autocoder/index/index.py
CHANGED
|
@@ -195,7 +195,7 @@ class IndexManager:
|
|
|
195
195
|
return True
|
|
196
196
|
return False
|
|
197
197
|
|
|
198
|
-
def build_index_for_single_source(self, source: SourceCode):
|
|
198
|
+
def build_index_for_single_source(self, source: SourceCode):
|
|
199
199
|
file_path = source.module_name
|
|
200
200
|
if not os.path.exists(file_path):
|
|
201
201
|
return None
|
|
@@ -205,6 +205,10 @@ class IndexManager:
|
|
|
205
205
|
|
|
206
206
|
md5 = hashlib.md5(source.source_code.encode("utf-8")).hexdigest()
|
|
207
207
|
|
|
208
|
+
model_name = getattr(self.index_llm, 'default_model_name', None)
|
|
209
|
+
if not model_name:
|
|
210
|
+
model_name = "unknown(without default model name)"
|
|
211
|
+
|
|
208
212
|
try:
|
|
209
213
|
start_time = time.monotonic()
|
|
210
214
|
source_code = source.source_code
|
|
@@ -230,13 +234,14 @@ class IndexManager:
|
|
|
230
234
|
symbols = self.get_all_file_symbols.with_llm(
|
|
231
235
|
self.index_llm).run(source.module_name, source_code)
|
|
232
236
|
time.sleep(self.anti_quota_limit)
|
|
233
|
-
|
|
237
|
+
|
|
234
238
|
self.printer.print_in_terminal(
|
|
235
239
|
"index_update_success",
|
|
236
240
|
style="green",
|
|
237
241
|
file_path=file_path,
|
|
238
242
|
md5=md5,
|
|
239
|
-
duration=time.monotonic() - start_time
|
|
243
|
+
duration=time.monotonic() - start_time,
|
|
244
|
+
model_name=model_name
|
|
240
245
|
)
|
|
241
246
|
|
|
242
247
|
except Exception as e:
|
|
@@ -246,7 +251,8 @@ class IndexManager:
|
|
|
246
251
|
"index_build_error",
|
|
247
252
|
style="red",
|
|
248
253
|
file_path=file_path,
|
|
249
|
-
error=str(e)
|
|
254
|
+
error=str(e),
|
|
255
|
+
model_name=model_name
|
|
250
256
|
)
|
|
251
257
|
return None
|
|
252
258
|
|
|
@@ -464,7 +470,7 @@ class IndexManager:
|
|
|
464
470
|
{file.file_path: file for file in all_results}.values())
|
|
465
471
|
return FileList(file_list=all_results)
|
|
466
472
|
|
|
467
|
-
def _query_index_with_thread(self, query, func):
|
|
473
|
+
def _query_index_with_thread(self, query, func):
|
|
468
474
|
all_results = []
|
|
469
475
|
lock = threading.Lock()
|
|
470
476
|
completed_threads = 0
|
autocoder/models.py
CHANGED
|
@@ -113,6 +113,14 @@ def get_model_by_name(name: str) -> Dict:
|
|
|
113
113
|
raise Exception(get_message_with_format("model_not_found", model_name=name))
|
|
114
114
|
return v[0]
|
|
115
115
|
|
|
116
|
+
|
|
117
|
+
def check_model_exists(name: str) -> bool:
|
|
118
|
+
"""
|
|
119
|
+
检查模型是否存在
|
|
120
|
+
"""
|
|
121
|
+
models = load_models()
|
|
122
|
+
return any(m["name"] == name.strip() for m in models)
|
|
123
|
+
|
|
116
124
|
def update_model_with_api_key(name: str, api_key: str) -> Dict:
|
|
117
125
|
"""
|
|
118
126
|
根据模型名称查找并更新模型的 api_key_path。
|
autocoder/tsproject/__init__.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from autocoder.common import SourceCode, AutoCoderArgs
|
|
2
|
-
from autocoder import common as
|
|
2
|
+
from autocoder import common as CommonUtils
|
|
3
3
|
from autocoder.utils.rest import HttpDoc
|
|
4
4
|
import os
|
|
5
5
|
from typing import Optional, Generator, List, Dict, Any
|
|
@@ -164,7 +164,7 @@ class TSProject:
|
|
|
164
164
|
logger.warning(f"Failed to read file: {file_path}. Error: {str(e)}")
|
|
165
165
|
return None
|
|
166
166
|
|
|
167
|
-
if not
|
|
167
|
+
if not CommonUtils.has_sufficient_content(source_code, min_line_count=1):
|
|
168
168
|
return None
|
|
169
169
|
|
|
170
170
|
return SourceCode(module_name=module_name, source_code=source_code)
|
|
@@ -141,7 +141,8 @@ def multi_stream_out(
|
|
|
141
141
|
def stream_out(
|
|
142
142
|
stream_generator: Generator[Tuple[str, Dict[str, Any]], None, None],
|
|
143
143
|
request_id: Optional[str] = None,
|
|
144
|
-
console: Optional[Console] = None
|
|
144
|
+
console: Optional[Console] = None,
|
|
145
|
+
model_name: Optional[str] = None
|
|
145
146
|
) -> Tuple[str, Optional[Dict[str, Any]]]:
|
|
146
147
|
"""
|
|
147
148
|
处理流式输出事件并在终端中展示
|
|
@@ -164,7 +165,7 @@ def stream_out(
|
|
|
164
165
|
|
|
165
166
|
try:
|
|
166
167
|
with Live(
|
|
167
|
-
Panel("", title="Response", border_style="green"),
|
|
168
|
+
Panel("", title=f"Response[ {model_name} ]", border_style="green"),
|
|
168
169
|
refresh_per_second=4,
|
|
169
170
|
console=console
|
|
170
171
|
) as live:
|
|
@@ -209,7 +210,7 @@ def stream_out(
|
|
|
209
210
|
live.update(
|
|
210
211
|
Panel(
|
|
211
212
|
Markdown(display_content),
|
|
212
|
-
title="Response",
|
|
213
|
+
title=f"Response[ {model_name} ]",
|
|
213
214
|
border_style="green",
|
|
214
215
|
height=min(50, live.console.height - 4)
|
|
215
216
|
)
|
|
@@ -223,7 +224,7 @@ def stream_out(
|
|
|
223
224
|
live.update(
|
|
224
225
|
Panel(
|
|
225
226
|
Markdown(assistant_response),
|
|
226
|
-
title="Final Response",
|
|
227
|
+
title=f"Final Response[ {model_name} ]",
|
|
227
228
|
border_style="blue"
|
|
228
229
|
)
|
|
229
230
|
)
|
|
@@ -231,7 +232,7 @@ def stream_out(
|
|
|
231
232
|
except Exception as e:
|
|
232
233
|
console.print(Panel(
|
|
233
234
|
f"Error: {str(e)}",
|
|
234
|
-
title="Error",
|
|
235
|
+
title=f"Error[ {model_name} ]",
|
|
235
236
|
border_style="red"
|
|
236
237
|
))
|
|
237
238
|
# import traceback
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.246"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|