auto-coder 0.1.240__py3-none-any.whl → 0.1.243__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/METADATA +2 -2
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/RECORD +18 -18
- autocoder/auto_coder.py +18 -3
- autocoder/chat_auto_coder.py +54 -7
- autocoder/chat_auto_coder_lang.py +8 -0
- autocoder/common/__init__.py +1 -0
- autocoder/common/auto_coder_lang.py +22 -9
- autocoder/common/code_auto_merge_editblock.py +2 -2
- autocoder/common/code_modification_ranker.py +4 -6
- autocoder/common/printer.py +3 -2
- autocoder/common/utils_code_auto_generate.py +7 -4
- autocoder/index/for_command.py +3 -2
- autocoder/utils/auto_coder_utils/chat_stream_out.py +263 -2
- autocoder/version.py +1 -1
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.240.dist-info → auto_coder-0.1.243.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.243
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter-client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas] >=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas] >=0.1.159
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff-match-patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
autocoder/auto_coder.py,sha256=
|
|
2
|
+
autocoder/auto_coder.py,sha256=iQeyrg5L87IgBLFJLZLIJetHFeMuT2uslRlzo_Vjq9s,61829
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
4
|
autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
|
|
5
5
|
autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
7
|
autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
|
|
8
8
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
9
|
-
autocoder/chat_auto_coder.py,sha256=
|
|
10
|
-
autocoder/chat_auto_coder_lang.py,sha256=
|
|
9
|
+
autocoder/chat_auto_coder.py,sha256=s9uMjDQQawXEsq171GO7SVMO4fDjWd4xWT0KYO7nRp4,105295
|
|
10
|
+
autocoder/chat_auto_coder_lang.py,sha256=V-VIieyKF5cwlK448B1V2LUbTdrU03tfgDrOk2aBvFk,14891
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
13
|
autocoder/models.py,sha256=FlBrF6HhGao_RiCSgYhCmP7vs0KlG4hI_BI6dyZiL9s,5292
|
|
14
|
-
autocoder/version.py,sha256=
|
|
14
|
+
autocoder/version.py,sha256=u0hWeuFclX3Z9nFe5oFsCdX854VeUHOs69Ggv1pvBvk,23
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -24,11 +24,11 @@ autocoder/agent/project_reader.py,sha256=tWLaPoLw1gI6kO_NzivQj28KbobU2ceOLuppHMb
|
|
|
24
24
|
autocoder/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
25
|
autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
|
|
26
26
|
autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
|
|
27
|
-
autocoder/common/__init__.py,sha256=
|
|
27
|
+
autocoder/common/__init__.py,sha256=2isE_u4VgfogwmcUCnFcussVFlzeNOLHDMFm5z_axbU,11774
|
|
28
28
|
autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
|
|
29
29
|
autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
|
|
30
30
|
autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
|
|
31
|
-
autocoder/common/auto_coder_lang.py,sha256=
|
|
31
|
+
autocoder/common/auto_coder_lang.py,sha256=9FBNhcl6Do4ICh-klevYsCTsDuy5kD99r8EE5Gs1QoM,12592
|
|
32
32
|
autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
|
|
33
33
|
autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
|
|
34
34
|
autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
|
|
@@ -39,9 +39,9 @@ autocoder/common/code_auto_generate_editblock.py,sha256=QdUHUkGaervvQNCY8T2vQ_tf
|
|
|
39
39
|
autocoder/common/code_auto_generate_strict_diff.py,sha256=uteWDEHfIbrnVgwKgqC7qwrIeW0enJCXcHzZGa48yY8,14774
|
|
40
40
|
autocoder/common/code_auto_merge.py,sha256=8dtnz61l0B5gNbQmx26TZ4_jD825dsnnWtAFD_zs6es,7335
|
|
41
41
|
autocoder/common/code_auto_merge_diff.py,sha256=yocfe8s3Pz6hTGDUl9wRIewY3NcTize_gEla64lsGT0,15331
|
|
42
|
-
autocoder/common/code_auto_merge_editblock.py,sha256=
|
|
42
|
+
autocoder/common/code_auto_merge_editblock.py,sha256=sp7C0fZJMVcNgI8uWy43CKDk7gGXFTkMB9kbP2VdY8k,17485
|
|
43
43
|
autocoder/common/code_auto_merge_strict_diff.py,sha256=9rm0NJ_n6M3LohEX7xl1Jym0xmm8UEYqj_ZTSO3oSlM,9519
|
|
44
|
-
autocoder/common/code_modification_ranker.py,sha256=
|
|
44
|
+
autocoder/common/code_modification_ranker.py,sha256=l0OAR7ad0hTV3xdfn7rO0KqnY5Y5qSG4BmWcClZ9RUQ,6104
|
|
45
45
|
autocoder/common/command_completer.py,sha256=SSeb8MDH0JPvfdyW-S2uaHnui4VBDfSQvQPLbv3ORPA,9314
|
|
46
46
|
autocoder/common/command_generator.py,sha256=v4LmU7sO-P7jEZIXCWHUC6P-vT7AvBi_x_PTwCqBAE8,1323
|
|
47
47
|
autocoder/common/command_templates.py,sha256=mnB3n8i0yjH1mqzyClEg8Wpr9VbZV44kxky66Zu6OJY,8557
|
|
@@ -55,7 +55,7 @@ autocoder/common/mcp_hub.py,sha256=2ZyJv3Aiv4Y97UHut49oYhIFcu7ICR-mptDEBSgT3uE,1
|
|
|
55
55
|
autocoder/common/mcp_server.py,sha256=QCFa-15kx7rbNsinwdGFFX2y47pww0fVdI-ldKFSSWI,12267
|
|
56
56
|
autocoder/common/mcp_tools.py,sha256=KsLvRrB6pvmebqd-lDaSH6IBJR0AIxWRE-dtCEG_w9k,12485
|
|
57
57
|
autocoder/common/memory_manager.py,sha256=2ZjYG7BPyvbYalZBF6AM_G5e10Qkw_zrqtD4Zd7GSsQ,3663
|
|
58
|
-
autocoder/common/printer.py,sha256=
|
|
58
|
+
autocoder/common/printer.py,sha256=TIqgGOq5YdWqH4_776QHwHmwfVpubZ9zzUq8rstNxuM,1911
|
|
59
59
|
autocoder/common/recall_validation.py,sha256=Avt9Q9dX3kG6Pf2zsdlOHmsjd-OeSj7U1PFBDp_Cve0,1700
|
|
60
60
|
autocoder/common/screenshots.py,sha256=_gA-z1HxGjPShBrtgkdideq58MG6rqFB2qMUJKjrycs,3769
|
|
61
61
|
autocoder/common/search.py,sha256=245iPFgWhMldoUK3CqCP89ltaxZiNPK73evoG6Fp1h8,16518
|
|
@@ -63,7 +63,7 @@ autocoder/common/search_replace.py,sha256=GphFkc57Hb673CAwmbiocqTbw8vrV7TrZxtOhD
|
|
|
63
63
|
autocoder/common/sys_prompt.py,sha256=JlexfjZt554faqbgkCmzOJqYUzDHfbnxly5ugFfHfEE,26403
|
|
64
64
|
autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
|
|
65
65
|
autocoder/common/types.py,sha256=PXTETrsTvhLE49jqAeUKGySvxBN9pjeyCgRHLDYdd9U,664
|
|
66
|
-
autocoder/common/utils_code_auto_generate.py,sha256=
|
|
66
|
+
autocoder/common/utils_code_auto_generate.py,sha256=kDW5B_2wRLk7hAls2hewliDacV86lrPz8Jan01BvtCw,3573
|
|
67
67
|
autocoder/common/mcp_servers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
68
68
|
autocoder/common/mcp_servers/mcp_server_perplexity.py,sha256=jz0LkCgZcqKkNdLZ9swNOu9Besoba4JOyHDedoZnWHo,5546
|
|
69
69
|
autocoder/data/tokenizer.json,sha256=QfO_ZCE9qMAS2L0IcaWKH99wRj6PCPEQ3bsQgvUp9mk,4607451
|
|
@@ -78,7 +78,7 @@ autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=ht_HWzZt84IEo
|
|
|
78
78
|
autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
|
|
79
79
|
autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
80
80
|
autocoder/index/entry.py,sha256=KJaxqtaKgL27w8-j7OiAqI0anPpmrJSl7PkfeVF2ipE,11713
|
|
81
|
-
autocoder/index/for_command.py,sha256=
|
|
81
|
+
autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
|
|
82
82
|
autocoder/index/index.py,sha256=8AcaELR1FS___7VlNyxPnJsDVQ4wjORbqXvcA6TifCE,20337
|
|
83
83
|
autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
|
|
84
84
|
autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
|
|
@@ -138,11 +138,11 @@ autocoder/utils/rest.py,sha256=opE_kBEdNQdxh350M5lUTMk5TViRfpuKP_qWc0B1lks,8861
|
|
|
138
138
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
139
139
|
autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
140
140
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
141
|
-
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=
|
|
141
|
+
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=fcXusKEUKMu8WY9Y1_JL5aPkC-soKFxQcFAKThrNZoQ,13338
|
|
142
142
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
143
|
-
auto_coder-0.1.
|
|
144
|
-
auto_coder-0.1.
|
|
145
|
-
auto_coder-0.1.
|
|
146
|
-
auto_coder-0.1.
|
|
147
|
-
auto_coder-0.1.
|
|
148
|
-
auto_coder-0.1.
|
|
143
|
+
auto_coder-0.1.243.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
144
|
+
auto_coder-0.1.243.dist-info/METADATA,sha256=zq_UlYzkagreMYmIrWVkCmf43Zr3_jIX1w577MlSXQE,2616
|
|
145
|
+
auto_coder-0.1.243.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
146
|
+
auto_coder-0.1.243.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
147
|
+
auto_coder-0.1.243.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
148
|
+
auto_coder-0.1.243.dist-info/RECORD,,
|
autocoder/auto_coder.py
CHANGED
|
@@ -14,7 +14,7 @@ from autocoder.utils.queue_communicate import (
|
|
|
14
14
|
|
|
15
15
|
import yaml
|
|
16
16
|
import os
|
|
17
|
-
import
|
|
17
|
+
import time
|
|
18
18
|
from byzerllm.utils.client import EventCallbackResult, EventName
|
|
19
19
|
from prompt_toolkit import prompt
|
|
20
20
|
from prompt_toolkit.formatted_text import FormattedText
|
|
@@ -1112,7 +1112,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1112
1112
|
json.dump(chat_history, f, ensure_ascii=False)
|
|
1113
1113
|
console.print(
|
|
1114
1114
|
Panel(
|
|
1115
|
-
"
|
|
1115
|
+
get_message("new_session_started"),
|
|
1116
1116
|
title="Session Status",
|
|
1117
1117
|
expand=False,
|
|
1118
1118
|
border_style="green",
|
|
@@ -1310,6 +1310,9 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1310
1310
|
printer = Printer()
|
|
1311
1311
|
printer.print_in_terminal("memory_save_success")
|
|
1312
1312
|
return {}
|
|
1313
|
+
|
|
1314
|
+
# 计算耗时
|
|
1315
|
+
start_time = time.time()
|
|
1313
1316
|
|
|
1314
1317
|
if "rag" in args.action:
|
|
1315
1318
|
args.enable_rag_search = True
|
|
@@ -1335,12 +1338,23 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1335
1338
|
conversations=loaded_conversations,
|
|
1336
1339
|
llm_config={}
|
|
1337
1340
|
)
|
|
1341
|
+
|
|
1342
|
+
|
|
1338
1343
|
|
|
1339
1344
|
assistant_response, last_meta = stream_out(
|
|
1340
1345
|
v,
|
|
1341
1346
|
request_id=args.request_id,
|
|
1342
1347
|
console=console
|
|
1343
1348
|
)
|
|
1349
|
+
|
|
1350
|
+
# 打印耗时和token统计
|
|
1351
|
+
if last_meta:
|
|
1352
|
+
elapsed_time = time.time() - start_time
|
|
1353
|
+
printer = Printer()
|
|
1354
|
+
printer.print_in_terminal("stream_out_stats",
|
|
1355
|
+
elapsed_time=elapsed_time,
|
|
1356
|
+
input_tokens=last_meta.input_tokens_count,
|
|
1357
|
+
output_tokens=last_meta.generated_tokens_count)
|
|
1344
1358
|
|
|
1345
1359
|
chat_history["ask_conversation"].append(
|
|
1346
1360
|
{"role": "assistant", "content": assistant_response}
|
|
@@ -1361,7 +1375,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1361
1375
|
save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
1362
1376
|
query=args.query,
|
|
1363
1377
|
response=assistant_response)
|
|
1364
|
-
|
|
1378
|
+
printer = Printer()
|
|
1379
|
+
printer.print_in_terminal("memory_save_success")
|
|
1365
1380
|
return
|
|
1366
1381
|
|
|
1367
1382
|
else:
|
autocoder/chat_auto_coder.py
CHANGED
|
@@ -1002,6 +1002,51 @@ def load_memory():
|
|
|
1002
1002
|
completer.update_current_files(memory["current_files"]["files"])
|
|
1003
1003
|
|
|
1004
1004
|
|
|
1005
|
+
def print_conf(content:Dict[str,Any]):
|
|
1006
|
+
"""Display configuration dictionary in a Rich table format with enhanced visual styling.
|
|
1007
|
+
|
|
1008
|
+
Args:
|
|
1009
|
+
conf (Dict[str, Any]): Configuration dictionary to display
|
|
1010
|
+
"""
|
|
1011
|
+
console = Console()
|
|
1012
|
+
|
|
1013
|
+
# Create a styled table with rounded borders
|
|
1014
|
+
table = Table(
|
|
1015
|
+
show_header=True,
|
|
1016
|
+
header_style="bold magenta",
|
|
1017
|
+
title=get_message("conf_title"),
|
|
1018
|
+
title_style="bold blue",
|
|
1019
|
+
border_style="blue",
|
|
1020
|
+
show_lines=True
|
|
1021
|
+
)
|
|
1022
|
+
|
|
1023
|
+
# Add columns with explicit width and alignment
|
|
1024
|
+
table.add_column(get_message("conf_key"), style="cyan", justify="right", width=30, no_wrap=False)
|
|
1025
|
+
table.add_column(get_message("conf_value"), style="green", justify="left", width=50, no_wrap=False)
|
|
1026
|
+
|
|
1027
|
+
# Sort keys for consistent display
|
|
1028
|
+
for key in sorted(content.keys()):
|
|
1029
|
+
value = content[key]
|
|
1030
|
+
# Format value based on type
|
|
1031
|
+
if isinstance(value, (dict, list)):
|
|
1032
|
+
formatted_value = Text(json.dumps(value, indent=2), style="yellow")
|
|
1033
|
+
elif isinstance(value, bool):
|
|
1034
|
+
formatted_value = Text(str(value), style="bright_green" if value else "red")
|
|
1035
|
+
elif isinstance(value, (int, float)):
|
|
1036
|
+
formatted_value = Text(str(value), style="bright_cyan")
|
|
1037
|
+
else:
|
|
1038
|
+
formatted_value = Text(str(value), style="green")
|
|
1039
|
+
|
|
1040
|
+
table.add_row(str(key), formatted_value)
|
|
1041
|
+
|
|
1042
|
+
# Add padding and print with a panel
|
|
1043
|
+
console.print(Panel(
|
|
1044
|
+
table,
|
|
1045
|
+
padding=(1, 2),
|
|
1046
|
+
subtitle=f"[italic]{get_message('conf_subtitle')}[/italic]",
|
|
1047
|
+
border_style="blue"
|
|
1048
|
+
))
|
|
1049
|
+
|
|
1005
1050
|
def revert():
|
|
1006
1051
|
last_yaml_file = get_last_yaml_file("actions")
|
|
1007
1052
|
if last_yaml_file:
|
|
@@ -2123,6 +2168,11 @@ def manage_models(params, query: str):
|
|
|
2123
2168
|
if "/add" in query:
|
|
2124
2169
|
subcmd = "/add"
|
|
2125
2170
|
query = query.replace("/add", "", 1).strip()
|
|
2171
|
+
|
|
2172
|
+
# alias to /add
|
|
2173
|
+
if "/activate" in query:
|
|
2174
|
+
subcmd = "/add"
|
|
2175
|
+
query = query.replace("/activate", "", 1).strip()
|
|
2126
2176
|
|
|
2127
2177
|
if "/remove" in query:
|
|
2128
2178
|
subcmd = "/remove"
|
|
@@ -2141,7 +2191,7 @@ def manage_models(params, query: str):
|
|
|
2141
2191
|
)
|
|
2142
2192
|
table.add_column("Name", style="cyan", width=40, no_wrap=False)
|
|
2143
2193
|
table.add_column("Model Name", style="magenta", width=30, overflow="fold")
|
|
2144
|
-
table.add_column("
|
|
2194
|
+
table.add_column("Base URL", style="white", width=50, overflow="fold")
|
|
2145
2195
|
for m in models_data:
|
|
2146
2196
|
# Check if api_key_path exists and file exists
|
|
2147
2197
|
api_key_path = m.get("api_key_path", "")
|
|
@@ -2154,7 +2204,7 @@ def manage_models(params, query: str):
|
|
|
2154
2204
|
table.add_row(
|
|
2155
2205
|
name,
|
|
2156
2206
|
m.get("model_name", ""),
|
|
2157
|
-
m.get("
|
|
2207
|
+
m.get("base_url", "")
|
|
2158
2208
|
)
|
|
2159
2209
|
console.print(table)
|
|
2160
2210
|
else:
|
|
@@ -2366,10 +2416,7 @@ def execute_shell_command(command: str):
|
|
|
2366
2416
|
if process.returncode != 0:
|
|
2367
2417
|
console.print(
|
|
2368
2418
|
f"[bold red]Command failed with return code {process.returncode}[/bold red]"
|
|
2369
|
-
)
|
|
2370
|
-
else:
|
|
2371
|
-
console.print(
|
|
2372
|
-
"[bold green]Command completed successfully[/bold green]")
|
|
2419
|
+
)
|
|
2373
2420
|
|
|
2374
2421
|
except FileNotFoundError:
|
|
2375
2422
|
console.print(
|
|
@@ -2705,7 +2752,7 @@ def main():
|
|
|
2705
2752
|
elif user_input.startswith("/conf"):
|
|
2706
2753
|
conf = user_input[len("/conf"):].strip()
|
|
2707
2754
|
if not conf:
|
|
2708
|
-
|
|
2755
|
+
print_conf(memory["conf"])
|
|
2709
2756
|
else:
|
|
2710
2757
|
configure(conf)
|
|
2711
2758
|
elif user_input.startswith("/revert"):
|
|
@@ -75,6 +75,10 @@ MESSAGES = {
|
|
|
75
75
|
"shell_desc": "Execute a shell command",
|
|
76
76
|
"voice_input_desc": "Convert voice input to text",
|
|
77
77
|
"mode_desc": "Switch input mode",
|
|
78
|
+
"conf_key": "Key",
|
|
79
|
+
"conf_value": "Value",
|
|
80
|
+
"conf_title": "Configuration Settings",
|
|
81
|
+
"conf_subtitle": "Use /conf <key>:<value> to modify these settings",
|
|
78
82
|
"lib_desc": "Manage libraries",
|
|
79
83
|
"exit_desc": "Exit the program",
|
|
80
84
|
"design_desc": "Generate SVG image based on the provided description",
|
|
@@ -187,6 +191,10 @@ MESSAGES = {
|
|
|
187
191
|
"design_desc": "根据需求设计SVG图片",
|
|
188
192
|
"commit_desc": "根据用户人工修改的代码自动生成yaml文件并提交更改",
|
|
189
193
|
"models_desc": "管理模型配置,仅在lite模式下可用",
|
|
194
|
+
"conf_key": "键",
|
|
195
|
+
"conf_value": "值",
|
|
196
|
+
"conf_title": "配置设置",
|
|
197
|
+
"conf_subtitle": "使用 /conf <key>:<value> 修改这些设置",
|
|
190
198
|
"models_usage": "用法: /models /list|/add|/add_model|/remove ...",
|
|
191
199
|
"models_added": "成功添加/更新模型 '{{name}}'。",
|
|
192
200
|
"models_add_failed": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。",
|
autocoder/common/__init__.py
CHANGED
|
@@ -350,6 +350,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
350
350
|
skip_events: Optional[bool] = False
|
|
351
351
|
data_cells_max_num: Optional[int] = 2000
|
|
352
352
|
generate_times_same_model: Optional[int] = 1
|
|
353
|
+
rank_times_same_model: Optional[int] = 1
|
|
353
354
|
|
|
354
355
|
action: List[str] = []
|
|
355
356
|
enable_global_memory: Optional[bool] = True
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import locale
|
|
2
2
|
|
|
3
3
|
MESSAGES = {
|
|
4
|
-
|
|
4
|
+
"en": {
|
|
5
|
+
"new_session_started": "New session started. Previous chat history has been archived.",
|
|
5
6
|
"memory_save_success": "✅ Saved to your memory",
|
|
6
7
|
"index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
|
|
7
8
|
"index_update_success": "✅ Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
|
|
@@ -60,9 +61,19 @@ MESSAGES = {
|
|
|
60
61
|
"ranking_all_failed": "All ranking requests failed",
|
|
61
62
|
"ranking_complete": "Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}",
|
|
62
63
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
63
|
-
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order"
|
|
64
|
+
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
65
|
+
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
66
|
+
"stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
67
|
+
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
68
|
+
"unmerged_blocks_title": "Unmerged Blocks",
|
|
69
|
+
"unmerged_file_path": "File: {{file_path}}",
|
|
70
|
+
"unmerged_search_block": "Search Block({{similarity}}):",
|
|
71
|
+
"unmerged_replace_block": "Replace Block:",
|
|
72
|
+
"unmerged_blocks_total": "Total unmerged blocks: {{num_blocks}}",
|
|
73
|
+
"git_init_required": "⚠️ auto_merge only applies to git repositories.\n\nPlease try using git init in the source directory:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\nThen run auto - coder again.\nError: {{ error }}"
|
|
64
74
|
},
|
|
65
75
|
"zh": {
|
|
76
|
+
"new_session_started": "新会话已开始。之前的聊天历史已存档。",
|
|
66
77
|
"memory_save_success": "✅ 已保存到您的记忆中",
|
|
67
78
|
"index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
|
|
68
79
|
"index_update_success": "✅ 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒",
|
|
@@ -108,16 +119,17 @@ MESSAGES = {
|
|
|
108
119
|
"normal_filter_start": "开始查找上下文(normal_filter)...",
|
|
109
120
|
"pylint_check_failed": "⚠️ Pylint 检查失败: {{ error_message }}",
|
|
110
121
|
"pylint_error": "❌ 运行 Pylint 时出错: {{ error_message }}",
|
|
122
|
+
"begin_index_source_code": "🚀 开始为 {{ source_dir }} 中的源代码建立索引",
|
|
111
123
|
"unmerged_blocks_warning": "⚠️ 发现 {{ num_blocks }} 个未合并的代码块,更改将不会被应用。请手动检查后重试。",
|
|
112
124
|
"pylint_file_check_failed": "⚠️ {{ file_path }} 的 Pylint 检查失败。更改未应用。错误: {{ error_message }}",
|
|
113
125
|
"merge_success": "✅ 成功合并了 {{ num_files }} 个文件中的更改 {{ num_changes }}/{{ total_blocks }} 个代码块。",
|
|
114
126
|
"no_changes_made": "⚠️ 未对任何文件进行更改。",
|
|
115
|
-
"unmerged_blocks_title": "
|
|
116
|
-
"unmerged_file_path": "
|
|
117
|
-
"unmerged_search_block": "Search Block({similarity}):",
|
|
127
|
+
"unmerged_blocks_title": "未合并代码块",
|
|
128
|
+
"unmerged_file_path": "文件: {file_path}",
|
|
129
|
+
"unmerged_search_block": "Search Block({{similarity}}):",
|
|
118
130
|
"unmerged_replace_block": "Replace Block:",
|
|
119
|
-
"unmerged_blocks_total": "
|
|
120
|
-
"git_init_required": "⚠️ auto_merge 仅适用于 git 仓库。\n\n请尝试在源目录中使用 git init:\n\n```shell\ncd {{ source_dir }}\ngit init
|
|
131
|
+
"unmerged_blocks_total": "未合并代码块数量: {{num_blocks}}",
|
|
132
|
+
"git_init_required": "⚠️ auto_merge 仅适用于 git 仓库。\n\n请尝试在源目录中使用 git init:\n\n```shell\ncd {{ source_dir }}\ngit init.\n```\n\n然后再次运行 auto-coder。\n错误: {{ error }}",
|
|
121
133
|
"upsert_file": "✅ 更新文件: {{ file_path }}",
|
|
122
134
|
"files_merged": "✅ 成功合并了 {{ total }} 个文件到项目中。",
|
|
123
135
|
"merge_failed": "❌ 合并文件 {{ path }} 失败: {{ error }}",
|
|
@@ -128,8 +140,9 @@ MESSAGES = {
|
|
|
128
140
|
"ranking_all_failed": "所有排序请求都失败",
|
|
129
141
|
"ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
|
|
130
142
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
131
|
-
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序"
|
|
132
|
-
|
|
143
|
+
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
144
|
+
"stream_out_stats": "耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}"
|
|
145
|
+
},
|
|
133
146
|
}
|
|
134
147
|
|
|
135
148
|
|
|
@@ -422,11 +422,11 @@ class CodeAutoMergeEditBlock:
|
|
|
422
422
|
self.printer.print_in_terminal("unmerged_blocks_title", style="bold red")
|
|
423
423
|
for file_path, head, update, similarity in unmerged_blocks:
|
|
424
424
|
self.printer.print_str_in_terminal(
|
|
425
|
-
f"\n{self.printer.
|
|
425
|
+
f"\n{self.printer.get_message_from_key_with_format('unmerged_file_path',file_path=file_path)}",
|
|
426
426
|
style="bold blue"
|
|
427
427
|
)
|
|
428
428
|
self.printer.print_str_in_terminal(
|
|
429
|
-
f"\n{self.printer.
|
|
429
|
+
f"\n{self.printer.get_message_from_key_with_format('unmerged_search_block',similarity=similarity)}",
|
|
430
430
|
style="bold green"
|
|
431
431
|
)
|
|
432
432
|
syntax = Syntax(head, "python", theme="monokai", line_numbers=True)
|
|
@@ -66,8 +66,8 @@ class CodeModificationRanker:
|
|
|
66
66
|
|
|
67
67
|
self.printer.print_in_terminal(
|
|
68
68
|
"ranking_start", style="blue", count=len(generate_result.contents))
|
|
69
|
-
|
|
70
|
-
total_tasks = len(self.llms) *
|
|
69
|
+
rank_times = self.args.rank_times_same_model
|
|
70
|
+
total_tasks = len(self.llms) * rank_times
|
|
71
71
|
|
|
72
72
|
query = self._rank_modifications.prompt(generate_result)
|
|
73
73
|
input_tokens_count = 0
|
|
@@ -78,7 +78,7 @@ class CodeModificationRanker:
|
|
|
78
78
|
# Submit tasks for each model and generate_times
|
|
79
79
|
futures = []
|
|
80
80
|
for llm in self.llms:
|
|
81
|
-
for _ in range(
|
|
81
|
+
for _ in range(rank_times):
|
|
82
82
|
futures.append(
|
|
83
83
|
executor.submit(
|
|
84
84
|
chat_with_continue,
|
|
@@ -99,9 +99,7 @@ class CodeModificationRanker:
|
|
|
99
99
|
results.append(v.rank_result)
|
|
100
100
|
except Exception as e:
|
|
101
101
|
self.printer.print_in_terminal(
|
|
102
|
-
"ranking_failed_request", style="yellow", error=str(e))
|
|
103
|
-
if self.args.debug:
|
|
104
|
-
print(traceback.format_exc())
|
|
102
|
+
"ranking_failed_request", style="yellow", error=str(e))
|
|
105
103
|
continue
|
|
106
104
|
|
|
107
105
|
if not results:
|
autocoder/common/printer.py
CHANGED
|
@@ -5,6 +5,7 @@ from typing import Optional,Dict,Any
|
|
|
5
5
|
from byzerllm.utils import format_str_jinja2
|
|
6
6
|
from autocoder.common.auto_coder_lang import get_message
|
|
7
7
|
from autocoder.chat_auto_coder_lang import get_message as get_chat_message
|
|
8
|
+
|
|
8
9
|
class Printer:
|
|
9
10
|
def __init__(self,console:Optional[Console]=None):
|
|
10
11
|
if console is None:
|
|
@@ -45,5 +46,5 @@ class Printer:
|
|
|
45
46
|
|
|
46
47
|
def print_panel(self, content: str, text_options:Dict[str,Any], panel_options:Dict[str,Any]):
|
|
47
48
|
panel = Panel(Text(content, **text_options), **panel_options)
|
|
48
|
-
self.console.print(panel)
|
|
49
|
-
|
|
49
|
+
self.console.print(panel)
|
|
50
|
+
|
|
@@ -57,7 +57,7 @@ def stream_chat_with_continue(
|
|
|
57
57
|
count = 0
|
|
58
58
|
temp_conversations = conversations
|
|
59
59
|
current_metadata = None
|
|
60
|
-
|
|
60
|
+
metadatas = {}
|
|
61
61
|
while True:
|
|
62
62
|
# 使用流式接口获取生成内容
|
|
63
63
|
stream_generator = llm.stream_chat_oai(
|
|
@@ -67,17 +67,20 @@ def stream_chat_with_continue(
|
|
|
67
67
|
)
|
|
68
68
|
|
|
69
69
|
current_content = ""
|
|
70
|
+
|
|
70
71
|
for res in stream_generator:
|
|
71
72
|
content = res[0]
|
|
72
73
|
current_content += content
|
|
73
74
|
if current_metadata is None:
|
|
74
75
|
current_metadata = res[1]
|
|
75
76
|
else:
|
|
76
|
-
|
|
77
|
-
current_metadata.
|
|
78
|
-
current_metadata.
|
|
77
|
+
metadatas[count] = res[1]
|
|
78
|
+
current_metadata.finish_reason = res[1].finish_reason
|
|
79
|
+
current_metadata.reasoning_content = res[1].reasoning_content
|
|
79
80
|
|
|
80
81
|
# Yield 当前的 StreamChatWithContinueResult
|
|
82
|
+
current_metadata.generated_tokens_count = sum([v.generated_tokens_count for _, v in metadatas.items()])
|
|
83
|
+
current_metadata.input_tokens_count = sum([v.input_tokens_count for _, v in metadatas.items()])
|
|
81
84
|
yield (content,current_metadata)
|
|
82
85
|
|
|
83
86
|
# 更新对话历史
|
autocoder/index/for_command.py
CHANGED
|
@@ -5,7 +5,7 @@ from autocoder.tsproject import TSProject
|
|
|
5
5
|
from autocoder.pyproject import PyProject
|
|
6
6
|
import tabulate
|
|
7
7
|
import textwrap
|
|
8
|
-
from
|
|
8
|
+
from autocoder.common.printer import Printer
|
|
9
9
|
import os
|
|
10
10
|
from autocoder.utils.request_queue import (
|
|
11
11
|
request_queue,
|
|
@@ -35,7 +35,8 @@ def wrap_text_in_table(data, max_width=60):
|
|
|
35
35
|
def index_command(args, llm):
|
|
36
36
|
source_dir = os.path.abspath(args.source_dir)
|
|
37
37
|
args.source_dir = source_dir
|
|
38
|
-
|
|
38
|
+
printer = Printer()
|
|
39
|
+
printer.print_in_terminal("begin_index_source_code", style="bold green", source_dir=source_dir)
|
|
39
40
|
if args.project_type == "ts":
|
|
40
41
|
pp = TSProject(args=args, llm=llm)
|
|
41
42
|
elif args.project_type == "py":
|
|
@@ -2,11 +2,265 @@ from rich.console import Console
|
|
|
2
2
|
from rich.live import Live
|
|
3
3
|
from rich.panel import Panel
|
|
4
4
|
from rich.markdown import Markdown
|
|
5
|
-
from
|
|
5
|
+
from rich.layout import Layout
|
|
6
|
+
from threading import Thread, Lock
|
|
7
|
+
from queue import Queue, Empty
|
|
8
|
+
from typing import Generator, List, Dict, Any, Optional, Tuple, Literal
|
|
6
9
|
from autocoder.utils.request_queue import RequestValue, RequestOption, StreamValue
|
|
7
10
|
from autocoder.utils.request_queue import request_queue
|
|
11
|
+
import time
|
|
8
12
|
|
|
9
13
|
MAX_HISTORY_LINES = 40 # 最大保留历史行数
|
|
14
|
+
LAYOUT_TYPES = Literal["vertical", "horizontal"]
|
|
15
|
+
|
|
16
|
+
class StreamController:
|
|
17
|
+
def __init__(self, layout_type: LAYOUT_TYPES = "vertical", console: Optional[Console] = None):
|
|
18
|
+
self.console = console or Console(force_terminal=True, color_system="auto", height=24) # 设置默认高度
|
|
19
|
+
self.layout = Layout()
|
|
20
|
+
self.queue = Queue()
|
|
21
|
+
self.lock = Lock()
|
|
22
|
+
self.running = True
|
|
23
|
+
self.workers = []
|
|
24
|
+
self.layout_type = layout_type
|
|
25
|
+
self.stream_count = 0
|
|
26
|
+
|
|
27
|
+
def _create_stream_panel(self, idx: int) -> Layout:
|
|
28
|
+
"""创建流面板布局"""
|
|
29
|
+
# 计算安全高度
|
|
30
|
+
current_height = self.console.height or 24 # 默认24行防止获取失败
|
|
31
|
+
safe_height = max(min(50, current_height // 2 - 4), 5) # 限制最小高度为5行
|
|
32
|
+
|
|
33
|
+
# 使用整数设置 Layout 的 size
|
|
34
|
+
panel = Layout(name=f"stream-{idx}", size=safe_height)
|
|
35
|
+
|
|
36
|
+
panel.update(
|
|
37
|
+
Panel(
|
|
38
|
+
Markdown(""),
|
|
39
|
+
title=f"Stream {idx + 1}",
|
|
40
|
+
border_style="green",
|
|
41
|
+
height=safe_height # 确保数值有效
|
|
42
|
+
)
|
|
43
|
+
)
|
|
44
|
+
return panel
|
|
45
|
+
|
|
46
|
+
def prepare_layout(self, count: int):
|
|
47
|
+
"""准备动态布局结构"""
|
|
48
|
+
self.stream_count = count
|
|
49
|
+
|
|
50
|
+
# 创建一个主布局容器
|
|
51
|
+
streams_layout = Layout(name="streams")
|
|
52
|
+
|
|
53
|
+
# 创建所有流的布局
|
|
54
|
+
stream_layouts = []
|
|
55
|
+
for i in range(count):
|
|
56
|
+
stream_layout = Layout(name=f"stream-{i}")
|
|
57
|
+
panel = self._create_stream_panel(i)
|
|
58
|
+
stream_layout.update(panel)
|
|
59
|
+
stream_layouts.append(stream_layout)
|
|
60
|
+
|
|
61
|
+
# 将所有流添加到主布局
|
|
62
|
+
if stream_layouts:
|
|
63
|
+
streams_layout.update(stream_layouts[0])
|
|
64
|
+
for i in range(1, len(stream_layouts)):
|
|
65
|
+
if self.layout_type == "vertical":
|
|
66
|
+
streams_layout.split_column(stream_layouts[i])
|
|
67
|
+
elif self.layout_type == "horizontal":
|
|
68
|
+
streams_layout.split_row(stream_layouts[i])
|
|
69
|
+
else:
|
|
70
|
+
streams_layout.split_column(stream_layouts[i])
|
|
71
|
+
|
|
72
|
+
# header 与 streams 布局分开
|
|
73
|
+
self.layout.split(
|
|
74
|
+
Layout(name="header", size=1),
|
|
75
|
+
streams_layout
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def update_panel(self, idx: int, content: str, final: bool = False):
|
|
79
|
+
"""线程安全的面板更新方法"""
|
|
80
|
+
with self.lock:
|
|
81
|
+
# 计算安全高度
|
|
82
|
+
safe_height = min(50, self.console.height // 2 - 4)
|
|
83
|
+
|
|
84
|
+
if final:
|
|
85
|
+
new_panel = Panel(
|
|
86
|
+
Markdown(content),
|
|
87
|
+
title=f"Final Stream {idx+1}",
|
|
88
|
+
border_style="blue",
|
|
89
|
+
height=safe_height
|
|
90
|
+
)
|
|
91
|
+
else:
|
|
92
|
+
new_panel = Panel(
|
|
93
|
+
Markdown(content),
|
|
94
|
+
title=f"Stream {idx+1}",
|
|
95
|
+
border_style="green",
|
|
96
|
+
height=safe_height
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
panel_name = f"stream-{idx}"
|
|
100
|
+
streams_layout = self.layout["streams"]
|
|
101
|
+
|
|
102
|
+
# 递归查找目标布局
|
|
103
|
+
def find_layout(layout, name):
|
|
104
|
+
if layout.name == name:
|
|
105
|
+
return layout
|
|
106
|
+
for child in layout.children:
|
|
107
|
+
result = find_layout(child, name)
|
|
108
|
+
if result:
|
|
109
|
+
return result
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
# 查找并更新目标布局
|
|
113
|
+
target_layout = find_layout(streams_layout, panel_name)
|
|
114
|
+
if target_layout:
|
|
115
|
+
target_layout.update(new_panel)
|
|
116
|
+
else:
|
|
117
|
+
import logging
|
|
118
|
+
logging.warning(f"未找到布局 {panel_name},无法更新面板。")
|
|
119
|
+
|
|
120
|
+
def stream_worker(
|
|
121
|
+
idx: int,
|
|
122
|
+
generator: Generator[Tuple[str, Dict[str, Any]], None, None],
|
|
123
|
+
controller: StreamController,
|
|
124
|
+
request_id: Optional[str] = None
|
|
125
|
+
) -> Tuple[str, Optional[Dict[str, Any]]]:
|
|
126
|
+
"""单个流处理工作线程"""
|
|
127
|
+
lines_buffer = []
|
|
128
|
+
current_line = ""
|
|
129
|
+
assistant_response = ""
|
|
130
|
+
last_meta = None
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
for res in generator:
|
|
134
|
+
content, meta = res
|
|
135
|
+
last_meta = meta
|
|
136
|
+
|
|
137
|
+
assistant_response += content
|
|
138
|
+
display_delta = meta.reasoning_content or content
|
|
139
|
+
|
|
140
|
+
parts = (current_line + display_delta).split("\n")
|
|
141
|
+
if len(parts) > 1:
|
|
142
|
+
lines_buffer.extend(parts[:-1])
|
|
143
|
+
if len(lines_buffer) > MAX_HISTORY_LINES:
|
|
144
|
+
del lines_buffer[0:len(lines_buffer) - MAX_HISTORY_LINES]
|
|
145
|
+
|
|
146
|
+
current_line = parts[-1]
|
|
147
|
+
display_content = "\n".join(lines_buffer[-MAX_HISTORY_LINES:] + [current_line])
|
|
148
|
+
|
|
149
|
+
controller.queue.put((idx, display_content, False))
|
|
150
|
+
|
|
151
|
+
if request_id and request_queue:
|
|
152
|
+
request_queue.add_request(
|
|
153
|
+
request_id,
|
|
154
|
+
RequestValue(
|
|
155
|
+
value=StreamValue(value=[content]),
|
|
156
|
+
status=RequestOption.RUNNING,
|
|
157
|
+
),
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
if current_line:
|
|
161
|
+
lines_buffer.append(current_line)
|
|
162
|
+
controller.queue.put((idx, assistant_response, True))
|
|
163
|
+
return assistant_response, last_meta
|
|
164
|
+
|
|
165
|
+
except Exception as e:
|
|
166
|
+
error_content = f"Error: {str(e)}"
|
|
167
|
+
controller.queue.put((idx, error_content, True))
|
|
168
|
+
if request_id and request_queue:
|
|
169
|
+
request_queue.add_request(
|
|
170
|
+
request_id,
|
|
171
|
+
RequestValue(
|
|
172
|
+
value=StreamValue(value=[str(e)]),
|
|
173
|
+
status=RequestOption.FAILED
|
|
174
|
+
),
|
|
175
|
+
)
|
|
176
|
+
return assistant_response, last_meta
|
|
177
|
+
finally:
|
|
178
|
+
if request_id and request_queue:
|
|
179
|
+
request_queue.add_request(
|
|
180
|
+
request_id,
|
|
181
|
+
RequestValue(
|
|
182
|
+
value=StreamValue(value=[""]),
|
|
183
|
+
status=RequestOption.COMPLETED
|
|
184
|
+
),
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def multi_stream_out(
|
|
188
|
+
stream_generators: List[Generator[Tuple[str, Dict[str, Any]], None, None]],
|
|
189
|
+
request_ids: Optional[List[str]] = None,
|
|
190
|
+
console: Optional[Console] = None,
|
|
191
|
+
layout_type: LAYOUT_TYPES = "vertical"
|
|
192
|
+
) -> List[Tuple[str, Optional[Dict[str, Any]]]]:
|
|
193
|
+
"""
|
|
194
|
+
多流并行输出处理器
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
stream_generators: 流处理器列表
|
|
198
|
+
request_ids: 对应请求ID列表
|
|
199
|
+
console: Rich Console对象
|
|
200
|
+
layout_type: 布局类型 vertical/horizontal
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
List[Tuple[str, Dict]]: 各流的处理结果
|
|
204
|
+
"""
|
|
205
|
+
# 确保使用统一的console实例
|
|
206
|
+
if console is None:
|
|
207
|
+
console = Console(force_terminal=True, color_system="auto", height=24)
|
|
208
|
+
|
|
209
|
+
# 初始化控制器
|
|
210
|
+
controller = StreamController(layout_type, console=console)
|
|
211
|
+
stream_count = len(stream_generators)
|
|
212
|
+
controller.prepare_layout(stream_count)
|
|
213
|
+
|
|
214
|
+
# 启动工作线程
|
|
215
|
+
results = [None] * stream_count
|
|
216
|
+
threads = []
|
|
217
|
+
|
|
218
|
+
# 创建工作线程
|
|
219
|
+
def worker_target(idx: int, gen: Generator[Tuple[str, Dict[str, Any]], None, None]):
|
|
220
|
+
req_id = request_ids[idx] if request_ids and idx < len(request_ids) else None
|
|
221
|
+
results[idx] = stream_worker(idx, gen, controller, req_id)
|
|
222
|
+
|
|
223
|
+
# 启动所有工作线程
|
|
224
|
+
for idx, gen in enumerate(stream_generators):
|
|
225
|
+
t = Thread(target=worker_target, args=(idx, gen))
|
|
226
|
+
t.start()
|
|
227
|
+
threads.append(t)
|
|
228
|
+
|
|
229
|
+
# 主渲染线程
|
|
230
|
+
try:
|
|
231
|
+
with Live(
|
|
232
|
+
controller.layout,
|
|
233
|
+
console=console or controller.console,
|
|
234
|
+
refresh_per_second=10,
|
|
235
|
+
screen=True
|
|
236
|
+
) as live:
|
|
237
|
+
while controller.running:
|
|
238
|
+
updated = False
|
|
239
|
+
try:
|
|
240
|
+
while True: # 处理队列中的所有更新
|
|
241
|
+
idx, content, final = controller.queue.get_nowait()
|
|
242
|
+
controller.update_panel(idx, content, final)
|
|
243
|
+
updated = True
|
|
244
|
+
except Empty:
|
|
245
|
+
pass
|
|
246
|
+
|
|
247
|
+
if updated:
|
|
248
|
+
live.refresh()
|
|
249
|
+
|
|
250
|
+
# 检查线程是否全部完成
|
|
251
|
+
if all(not t.is_alive() for t in threads):
|
|
252
|
+
break
|
|
253
|
+
|
|
254
|
+
time.sleep(0.1)
|
|
255
|
+
|
|
256
|
+
finally:
|
|
257
|
+
controller.running = False
|
|
258
|
+
for t in threads:
|
|
259
|
+
t.join()
|
|
260
|
+
|
|
261
|
+
# 确保最后一次刷新
|
|
262
|
+
(console or controller.console).print(controller.layout)
|
|
263
|
+
return results
|
|
10
264
|
|
|
11
265
|
def stream_out(
|
|
12
266
|
stream_generator: Generator[Tuple[str, Dict[str, Any]], None, None],
|
|
@@ -41,10 +295,17 @@ def stream_out(
|
|
|
41
295
|
for res in stream_generator:
|
|
42
296
|
last_meta = res[1]
|
|
43
297
|
content = res[0]
|
|
298
|
+
reasoning_content = last_meta.reasoning_content
|
|
299
|
+
|
|
300
|
+
if reasoning_content == "" and content == "":
|
|
301
|
+
continue
|
|
302
|
+
|
|
44
303
|
assistant_response += content
|
|
304
|
+
|
|
305
|
+
display_delta = reasoning_content if reasoning_content else content
|
|
45
306
|
|
|
46
307
|
# 处理所有行
|
|
47
|
-
parts = (current_line +
|
|
308
|
+
parts = (current_line + display_delta).split("\n")
|
|
48
309
|
|
|
49
310
|
# 最后一部分是未完成的新行
|
|
50
311
|
if len(parts) > 1:
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.243"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|