auto-coder 0.1.263__py3-none-any.whl → 0.1.264__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/METADATA +1 -1
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/RECORD +24 -23
- autocoder/chat_auto_coder.py +53 -49
- autocoder/common/__init__.py +6 -0
- autocoder/common/auto_coder_lang.py +6 -2
- autocoder/common/code_auto_generate_diff.py +9 -9
- autocoder/common/code_auto_merge.py +23 -3
- autocoder/common/code_auto_merge_diff.py +28 -3
- autocoder/common/code_auto_merge_editblock.py +24 -4
- autocoder/common/code_auto_merge_strict_diff.py +23 -3
- autocoder/common/code_modification_ranker.py +65 -3
- autocoder/common/conf_validator.py +6 -0
- autocoder/common/context_pruner.py +305 -0
- autocoder/index/entry.py +8 -2
- autocoder/index/filter/normal_filter.py +13 -2
- autocoder/index/filter/quick_filter.py +127 -13
- autocoder/index/index.py +3 -2
- autocoder/utils/project_structure.py +258 -3
- autocoder/utils/thread_utils.py +6 -1
- autocoder/version.py +1 -1
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.263.dist-info → auto_coder-0.1.264.dist-info}/top_level.txt +0 -0
|
@@ -6,12 +6,12 @@ autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeat
|
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
7
|
autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
|
|
8
8
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
9
|
-
autocoder/chat_auto_coder.py,sha256=
|
|
9
|
+
autocoder/chat_auto_coder.py,sha256=LektrcL3jr9iTgmDK4B5YjkFskIkdJB7y5JTVJODuWw,110454
|
|
10
10
|
autocoder/chat_auto_coder_lang.py,sha256=ShOQVOnMA-WlT-fB9OrOer-xQkbcWxJGl-WMPuZcUkM,19572
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
13
|
autocoder/models.py,sha256=rG7ckiKlers-XoO1gWxNK-Y-IbqD82WS3qFMPHqvFsc,9072
|
|
14
|
-
autocoder/version.py,sha256=
|
|
14
|
+
autocoder/version.py,sha256=grAb0FoqsepZ1Bs3Y464jN2s0QwFZDiqKUTBSvFupwk,23
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -28,30 +28,31 @@ autocoder/commands/auto_command.py,sha256=nPzTdVJES1MDfBfRuKl7SxKh_8CZGCz-pPAtEi
|
|
|
28
28
|
autocoder/commands/tools.py,sha256=rgZWuTtmn-Ck7G2EkeBRMFh6_TjLzssW1mabQoCrzR0,20327
|
|
29
29
|
autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
|
|
30
30
|
autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
|
|
31
|
-
autocoder/common/__init__.py,sha256=
|
|
31
|
+
autocoder/common/__init__.py,sha256=Y9HeCK6yNRsfNFvrfVHZSb_QUZcoGr1pcCO3JEkHBco,12787
|
|
32
32
|
autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
|
|
33
33
|
autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
|
|
34
34
|
autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
|
|
35
|
-
autocoder/common/auto_coder_lang.py,sha256=
|
|
35
|
+
autocoder/common/auto_coder_lang.py,sha256=nEVmME_daf4ldRjRwAWt3zFhTTH1Ss3Do6---EX9eYA,26212
|
|
36
36
|
autocoder/common/auto_configure.py,sha256=tdEwfycZUjomZAgps1GOCtocYEtfuUgRksYPFHBP_bs,12211
|
|
37
37
|
autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
|
|
38
38
|
autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
|
|
39
39
|
autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
|
|
40
40
|
autocoder/common/code_auto_execute.py,sha256=4KXGmiGObr_B1d6tzV9dwS6MifCSc3Gm4j2d6ildBXQ,6867
|
|
41
41
|
autocoder/common/code_auto_generate.py,sha256=74wCscxVEnY_VDkHcr-QA3b79RhDR_OeVPOI7UKRJwA,13040
|
|
42
|
-
autocoder/common/code_auto_generate_diff.py,sha256=
|
|
42
|
+
autocoder/common/code_auto_generate_diff.py,sha256=d6ALHEeCTFfoJVwkBhusUUib665UJDoMn1dV7Uze4sE,18809
|
|
43
43
|
autocoder/common/code_auto_generate_editblock.py,sha256=LcGfG4bJVCVsWehex7MYWDF4NX0B2Rp2ALSh-27MclA,20472
|
|
44
44
|
autocoder/common/code_auto_generate_strict_diff.py,sha256=JvKnD5Ph3JtAiVIO_k_XKUnVBeUxwLw_AHF_xWWtX7c,17488
|
|
45
|
-
autocoder/common/code_auto_merge.py,sha256=
|
|
46
|
-
autocoder/common/code_auto_merge_diff.py,sha256=
|
|
47
|
-
autocoder/common/code_auto_merge_editblock.py,sha256=
|
|
48
|
-
autocoder/common/code_auto_merge_strict_diff.py,sha256=
|
|
49
|
-
autocoder/common/code_modification_ranker.py,sha256=
|
|
45
|
+
autocoder/common/code_auto_merge.py,sha256=cMEX44QT59iMPEo5B8OQKU9fAzn4iIahEPk2n3rNFvg,8643
|
|
46
|
+
autocoder/common/code_auto_merge_diff.py,sha256=r_-5He34g8BJvyrk6ntX6da9qICRAPR-35m8v_xivLw,16808
|
|
47
|
+
autocoder/common/code_auto_merge_editblock.py,sha256=5D8uJPz9O3hzqbDQshHGy18kJ7c33rBb0Y_O5raOiCQ,18793
|
|
48
|
+
autocoder/common/code_auto_merge_strict_diff.py,sha256=d2DWtgjLh3-BpQR4F3x1Piz8zXw-VUjaUxGNRbB91sw,10827
|
|
49
|
+
autocoder/common/code_modification_ranker.py,sha256=sRNIPIsQP8FucxSuBeAHKJ7RWB_iB5Pb_cd0BAhvgGc,10926
|
|
50
50
|
autocoder/common/command_completer.py,sha256=Nd-DdlFd2MMy9jpSI5N5rGKek-zEzI4Wa2g9w7mjQYk,35122
|
|
51
51
|
autocoder/common/command_generator.py,sha256=-hmbD_AnCa5HxL4BznuEfYAf_l8AxU5fAG5F0sM_fuE,2116
|
|
52
52
|
autocoder/common/command_templates.py,sha256=lAdr0-iyJKY2dOH2mZ0Tm3GlT_a1Oj8mgdKXmDiQN3A,8654
|
|
53
|
-
autocoder/common/conf_validator.py,sha256=
|
|
53
|
+
autocoder/common/conf_validator.py,sha256=Pqf8m12YqZtxEEjvSD4mqj6jeZiwfV9ggEMd2PKq1kw,8787
|
|
54
54
|
autocoder/common/const.py,sha256=eTjhjh4Aj4CUzviJ81jaf3Y5cwqsLATySn2wJxaS6RQ,2911
|
|
55
|
+
autocoder/common/context_pruner.py,sha256=yaDAMX8Zgfgl666gp1Rwxd7I7jmZXtEKaf3_TebJ0I4,12021
|
|
55
56
|
autocoder/common/conversation_pruner.py,sha256=mdMpTpTdPJl8f0UjC1TGKRiYtDc1o6QQD0nYPR9yp1c,5628
|
|
56
57
|
autocoder/common/files.py,sha256=CguxG9digkWBJpRaILErZmL_G5ryPRahPmPFWGB7X18,1973
|
|
57
58
|
autocoder/common/git_utils.py,sha256=qeuF_IB3G3M72asHxWokROU3hINCuFA1nar-UtF9wIU,26022
|
|
@@ -90,14 +91,14 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
|
|
|
90
91
|
autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=2Ikj6dlgezXJQC3hnbC5mrDuSGtF20bVbpOrEDOq25s,6984
|
|
91
92
|
autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
|
|
92
93
|
autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
93
|
-
autocoder/index/entry.py,sha256=
|
|
94
|
+
autocoder/index/entry.py,sha256=XCq1GCBq1mDK8xo35VzixTwodx4mPytY0gqPoz6EvzU,13401
|
|
94
95
|
autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
|
|
95
|
-
autocoder/index/index.py,sha256=
|
|
96
|
+
autocoder/index/index.py,sha256=SGRp77_Dkfs0qWVCrB0lyO8aLPLtj-vc3tZOaODezKI,25422
|
|
96
97
|
autocoder/index/symbols_utils.py,sha256=_EP7E_qWXxluAxq3FGZLlLfdrfwx3FmxCdulI8VGuac,2244
|
|
97
98
|
autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
|
|
98
99
|
autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
99
|
-
autocoder/index/filter/normal_filter.py,sha256=
|
|
100
|
-
autocoder/index/filter/quick_filter.py,sha256=
|
|
100
|
+
autocoder/index/filter/normal_filter.py,sha256=QrG3YJU5F9CjmVm6kjfce_3_pYhvw_Q65betD0Vhryg,8356
|
|
101
|
+
autocoder/index/filter/quick_filter.py,sha256=FL-Hn8Cz0IC4e752dKpOKLXO92V7yOGl_crFmRGqwhE,22552
|
|
101
102
|
autocoder/privacy/__init__.py,sha256=LnIVvGu_K66zCE-yhN_-dPO8R80pQyedCsXJ7wRqQaI,72
|
|
102
103
|
autocoder/privacy/model_filter.py,sha256=-N9ZvxxDKpxU7hkn-tKv-QHyXjvkCopUaKgvJwTOGQs,3369
|
|
103
104
|
autocoder/pyproject/__init__.py,sha256=bRuGxFV4QyE85xVjDzeMFmlLVqGbbcFs09FI15Uss4Q,14423
|
|
@@ -147,20 +148,20 @@ autocoder/utils/model_provider_selector.py,sha256=g5O9frBWkXR7iqjYDdTvhoxzTQx0Na
|
|
|
147
148
|
autocoder/utils/multi_turn.py,sha256=unK9OpqVRbK6uIcTKXgggX2wNmyj7s5eyEAQ2xUwHoM,88
|
|
148
149
|
autocoder/utils/operate_config_api.py,sha256=99YAKsuUFLPwrRvj0CJal_bAPgyiXWMma6ZKMU56thw,5790
|
|
149
150
|
autocoder/utils/print_table.py,sha256=ZMRhCA9DD0FUfKyJBWd5bDdj1RrtPtgOMWSJwtvZcLs,403
|
|
150
|
-
autocoder/utils/project_structure.py,sha256=
|
|
151
|
+
autocoder/utils/project_structure.py,sha256=3X94fkXC1aMztHaLwKGxucE3s_1hD4gFarg4vvN-pfE,10313
|
|
151
152
|
autocoder/utils/queue_communicate.py,sha256=buyEzdvab1QA4i2QKbq35rG5v_9x9PWVLWWMTznWcYM,6832
|
|
152
153
|
autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1vmRciKdi4,2095
|
|
153
154
|
autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
|
|
154
155
|
autocoder/utils/rest.py,sha256=hLBhr78y-WVnV0oQf9Rxc22EwqF78KINkScvYa1MuYA,6435
|
|
155
156
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
156
|
-
autocoder/utils/thread_utils.py,sha256=
|
|
157
|
+
autocoder/utils/thread_utils.py,sha256=tv9fhFZOjI18AxVUJbpe_xjBGMpkqgDcOlz9pnDtNik,8583
|
|
157
158
|
autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
158
159
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
159
160
|
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=lkJ_A-sYU36JMzjFWkk3pR6uos8oZHYt9GPsPe_CPAo,11766
|
|
160
161
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
161
|
-
auto_coder-0.1.
|
|
162
|
-
auto_coder-0.1.
|
|
163
|
-
auto_coder-0.1.
|
|
164
|
-
auto_coder-0.1.
|
|
165
|
-
auto_coder-0.1.
|
|
166
|
-
auto_coder-0.1.
|
|
162
|
+
auto_coder-0.1.264.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
163
|
+
auto_coder-0.1.264.dist-info/METADATA,sha256=QrJeenHIyx1s1s0gNNEKJ0VD_2vV4_pwpDQaIo7Lado,2616
|
|
164
|
+
auto_coder-0.1.264.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
165
|
+
auto_coder-0.1.264.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
166
|
+
auto_coder-0.1.264.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
167
|
+
auto_coder-0.1.264.dist-info/RECORD,,
|
autocoder/chat_auto_coder.py
CHANGED
|
@@ -268,7 +268,7 @@ def initialize_system(args):
|
|
|
268
268
|
from autocoder.utils.model_provider_selector import ModelProviderSelector
|
|
269
269
|
from autocoder import models as models_module
|
|
270
270
|
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
271
|
-
|
|
271
|
+
|
|
272
272
|
first_time = [False]
|
|
273
273
|
configure_success = [False]
|
|
274
274
|
|
|
@@ -391,7 +391,7 @@ def initialize_system(args):
|
|
|
391
391
|
except subprocess.CalledProcessError:
|
|
392
392
|
print_status(get_message("deploy_fail"), "error")
|
|
393
393
|
return
|
|
394
|
-
|
|
394
|
+
|
|
395
395
|
|
|
396
396
|
deploy_cmd = [
|
|
397
397
|
"byzerllm",
|
|
@@ -672,12 +672,12 @@ completer = CommandCompleter(commands,
|
|
|
672
672
|
|
|
673
673
|
def print_conf(content:Dict[str,Any]):
|
|
674
674
|
"""Display configuration dictionary in a Rich table format with enhanced visual styling.
|
|
675
|
-
|
|
675
|
+
|
|
676
676
|
Args:
|
|
677
677
|
conf (Dict[str, Any]): Configuration dictionary to display
|
|
678
678
|
"""
|
|
679
679
|
console = Console()
|
|
680
|
-
|
|
680
|
+
|
|
681
681
|
# Create a styled table with rounded borders
|
|
682
682
|
table = Table(
|
|
683
683
|
show_header=True,
|
|
@@ -687,11 +687,11 @@ def print_conf(content:Dict[str,Any]):
|
|
|
687
687
|
border_style="blue",
|
|
688
688
|
show_lines=True
|
|
689
689
|
)
|
|
690
|
-
|
|
690
|
+
|
|
691
691
|
# Add columns with explicit width and alignment
|
|
692
692
|
table.add_column(get_message("conf_key"), style="cyan", justify="right", width=30, no_wrap=False)
|
|
693
693
|
table.add_column(get_message("conf_value"), style="green", justify="left", width=50, no_wrap=False)
|
|
694
|
-
|
|
694
|
+
|
|
695
695
|
# Sort keys for consistent display
|
|
696
696
|
for key in sorted(content.keys()):
|
|
697
697
|
value = content[key]
|
|
@@ -704,9 +704,9 @@ def print_conf(content:Dict[str,Any]):
|
|
|
704
704
|
formatted_value = Text(str(value), style="bright_cyan")
|
|
705
705
|
else:
|
|
706
706
|
formatted_value = Text(str(value), style="green")
|
|
707
|
-
|
|
707
|
+
|
|
708
708
|
table.add_row(str(key), formatted_value)
|
|
709
|
-
|
|
709
|
+
|
|
710
710
|
# Add padding and print with a panel
|
|
711
711
|
console.print(Panel(
|
|
712
712
|
table,
|
|
@@ -742,7 +742,7 @@ def revert():
|
|
|
742
742
|
|
|
743
743
|
|
|
744
744
|
def add_files(args: List[str]):
|
|
745
|
-
|
|
745
|
+
|
|
746
746
|
result_manager = ResultManager()
|
|
747
747
|
if "groups" not in memory["current_files"]:
|
|
748
748
|
memory["current_files"]["groups"] = {}
|
|
@@ -837,7 +837,7 @@ def add_files(args: List[str]):
|
|
|
837
837
|
)
|
|
838
838
|
result_manager.append(content=f"Added group '{group_name}' with current files.",
|
|
839
839
|
meta={"action": "add_files","success":True, "input":{ "args": args}})
|
|
840
|
-
|
|
840
|
+
|
|
841
841
|
elif len(args) >= 3 and args[1] == "/drop":
|
|
842
842
|
group_name = args[2]
|
|
843
843
|
if group_name in groups:
|
|
@@ -1272,14 +1272,14 @@ def mcp(query: str):
|
|
|
1272
1272
|
os.makedirs(mcp_dir, exist_ok=True)
|
|
1273
1273
|
timestamp = str(int(time.time()))
|
|
1274
1274
|
file_path = os.path.join(mcp_dir, f"{timestamp}.md")
|
|
1275
|
-
|
|
1275
|
+
|
|
1276
1276
|
# Format response as markdown
|
|
1277
1277
|
markdown_content = response.result
|
|
1278
|
-
|
|
1278
|
+
|
|
1279
1279
|
# Save to file
|
|
1280
1280
|
with open(file_path, "w", encoding="utf-8") as f:
|
|
1281
1281
|
f.write(markdown_content)
|
|
1282
|
-
|
|
1282
|
+
|
|
1283
1283
|
console = Console()
|
|
1284
1284
|
console.print(
|
|
1285
1285
|
Panel(
|
|
@@ -1424,13 +1424,13 @@ def commit(query: str):
|
|
|
1424
1424
|
finally:
|
|
1425
1425
|
if os.path.exists(temp_yaml):
|
|
1426
1426
|
os.remove(temp_yaml)
|
|
1427
|
-
|
|
1427
|
+
|
|
1428
1428
|
target_model = args.commit_model or args.model
|
|
1429
1429
|
llm = get_single_llm(target_model, product_mode)
|
|
1430
1430
|
printer = Printer()
|
|
1431
1431
|
printer.print_in_terminal("commit_generating", style="yellow", model_name=target_model)
|
|
1432
1432
|
commit_message = ""
|
|
1433
|
-
|
|
1433
|
+
|
|
1434
1434
|
try:
|
|
1435
1435
|
uncommitted_changes = git_utils.get_uncommitted_changes(".")
|
|
1436
1436
|
commit_message = git_utils.generate_commit_message.with_llm(llm).run(
|
|
@@ -1441,7 +1441,7 @@ def commit(query: str):
|
|
|
1441
1441
|
except Exception as e:
|
|
1442
1442
|
printer.print_in_terminal("commit_failed", style="red", error=str(e), model_name=target_model)
|
|
1443
1443
|
return
|
|
1444
|
-
|
|
1444
|
+
|
|
1445
1445
|
yaml_config["query"] = commit_message
|
|
1446
1446
|
yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
|
|
1447
1447
|
with open(os.path.join(execute_file), "w") as f:
|
|
@@ -1513,14 +1513,14 @@ def coding(query: str):
|
|
|
1513
1513
|
converted_value = convert_config_value(key, value)
|
|
1514
1514
|
if converted_value is not None:
|
|
1515
1515
|
yaml_config[key] = converted_value
|
|
1516
|
-
|
|
1516
|
+
|
|
1517
1517
|
yaml_config["urls"] = current_files + get_llm_friendly_package_docs(
|
|
1518
1518
|
return_paths=True
|
|
1519
1519
|
)
|
|
1520
1520
|
|
|
1521
1521
|
if conf.get("enable_global_memory", "true") in ["true", "True",True]:
|
|
1522
1522
|
yaml_config["urls"] += get_global_memory_file_paths()
|
|
1523
|
-
|
|
1523
|
+
|
|
1524
1524
|
# handle image
|
|
1525
1525
|
v = Image.convert_image_paths_from(query)
|
|
1526
1526
|
yaml_config["query"] = v
|
|
@@ -1667,7 +1667,7 @@ def chat(query: str):
|
|
|
1667
1667
|
if "/save" in query:
|
|
1668
1668
|
yaml_config["action"].append("save")
|
|
1669
1669
|
query = query.replace("/save", "", 1).strip()
|
|
1670
|
-
|
|
1670
|
+
|
|
1671
1671
|
if "/review" in query and "/commit" in query:
|
|
1672
1672
|
yaml_config["action"].append("review_commit")
|
|
1673
1673
|
query = query.replace("/review", "", 1).replace("/commit", "", 1).strip()
|
|
@@ -1892,22 +1892,22 @@ def manage_models(query: str):
|
|
|
1892
1892
|
"""
|
|
1893
1893
|
printer = Printer()
|
|
1894
1894
|
console = Console()
|
|
1895
|
-
|
|
1895
|
+
|
|
1896
1896
|
product_mode = memory.get("product_mode", "lite")
|
|
1897
1897
|
if product_mode != "lite":
|
|
1898
1898
|
printer.print_in_terminal("models_lite_only", style="red")
|
|
1899
1899
|
return
|
|
1900
|
-
|
|
1900
|
+
|
|
1901
1901
|
models_data = models_module.load_models()
|
|
1902
1902
|
subcmd = ""
|
|
1903
1903
|
if "/list" in query:
|
|
1904
1904
|
subcmd = "/list"
|
|
1905
1905
|
query = query.replace("/list", "", 1).strip()
|
|
1906
|
-
|
|
1906
|
+
|
|
1907
1907
|
if "/add_model" in query:
|
|
1908
1908
|
subcmd = "/add_model"
|
|
1909
1909
|
query = query.replace("/add_model", "", 1).strip()
|
|
1910
|
-
|
|
1910
|
+
|
|
1911
1911
|
if "/add" in query:
|
|
1912
1912
|
subcmd = "/add"
|
|
1913
1913
|
query = query.replace("/add", "", 1).strip()
|
|
@@ -1916,7 +1916,7 @@ def manage_models(query: str):
|
|
|
1916
1916
|
if "/activate" in query:
|
|
1917
1917
|
subcmd = "/add"
|
|
1918
1918
|
query = query.replace("/activate", "", 1).strip()
|
|
1919
|
-
|
|
1919
|
+
|
|
1920
1920
|
if "/remove" in query:
|
|
1921
1921
|
subcmd = "/remove"
|
|
1922
1922
|
query = query.replace("/remove", "", 1).strip()
|
|
@@ -1936,23 +1936,23 @@ def manage_models(query: str):
|
|
|
1936
1936
|
if "output_price" in query:
|
|
1937
1937
|
subcmd = "/output_price"
|
|
1938
1938
|
query = query.replace("/output_price", "", 1).strip()
|
|
1939
|
-
|
|
1939
|
+
|
|
1940
1940
|
if "/speed" in query:
|
|
1941
1941
|
subcmd = "/speed"
|
|
1942
1942
|
query = query.replace("/speed", "", 1).strip()
|
|
1943
|
-
|
|
1944
|
-
|
|
1943
|
+
|
|
1944
|
+
|
|
1945
1945
|
|
|
1946
1946
|
if not subcmd:
|
|
1947
1947
|
printer.print_in_terminal("models_usage")
|
|
1948
|
-
|
|
1948
|
+
|
|
1949
1949
|
result_manager = ResultManager()
|
|
1950
1950
|
if subcmd == "/list":
|
|
1951
1951
|
if models_data:
|
|
1952
1952
|
# Sort models by speed (average_speed)
|
|
1953
1953
|
sorted_models = sorted(models_data, key=lambda x: float(x.get('average_speed', 0)))
|
|
1954
1954
|
sorted_models.reverse()
|
|
1955
|
-
|
|
1955
|
+
|
|
1956
1956
|
table = Table(
|
|
1957
1957
|
title=printer.get_message_from_key("models_title"),
|
|
1958
1958
|
expand=True,
|
|
@@ -1973,7 +1973,7 @@ def manage_models(query: str):
|
|
|
1973
1973
|
if not api_key:
|
|
1974
1974
|
printer.print_in_terminal("models_api_key_empty", style="yellow", name=name)
|
|
1975
1975
|
name = f"{name} *"
|
|
1976
|
-
|
|
1976
|
+
|
|
1977
1977
|
table.add_row(
|
|
1978
1978
|
name,
|
|
1979
1979
|
m.get("model_name", ""),
|
|
@@ -1989,7 +1989,7 @@ def manage_models(query: str):
|
|
|
1989
1989
|
"query": query
|
|
1990
1990
|
}
|
|
1991
1991
|
})
|
|
1992
|
-
|
|
1992
|
+
|
|
1993
1993
|
else:
|
|
1994
1994
|
printer.print_in_terminal("models_no_models", style="yellow")
|
|
1995
1995
|
result_manager.add_result(content="No models found",meta={
|
|
@@ -2037,7 +2037,7 @@ def manage_models(query: str):
|
|
|
2037
2037
|
}
|
|
2038
2038
|
})
|
|
2039
2039
|
printer.print_in_terminal("models_input_price_usage", style="red")
|
|
2040
|
-
|
|
2040
|
+
|
|
2041
2041
|
elif subcmd == "/output_price":
|
|
2042
2042
|
args = query.strip().split()
|
|
2043
2043
|
if len(args) >= 2:
|
|
@@ -2115,11 +2115,11 @@ def manage_models(query: str):
|
|
|
2115
2115
|
}
|
|
2116
2116
|
})
|
|
2117
2117
|
printer.print_in_terminal("models_speed_usage", style="red")
|
|
2118
|
-
|
|
2118
|
+
|
|
2119
2119
|
elif subcmd == "/speed-test":
|
|
2120
2120
|
from autocoder.common.model_speed_test import render_speed_test_in_terminal
|
|
2121
2121
|
test_rounds = 1 # 默认测试轮数
|
|
2122
|
-
|
|
2122
|
+
|
|
2123
2123
|
enable_long_context = False
|
|
2124
2124
|
if "/long_context" in query:
|
|
2125
2125
|
enable_long_context = True
|
|
@@ -2133,7 +2133,7 @@ def manage_models(query: str):
|
|
|
2133
2133
|
args = query.strip().split()
|
|
2134
2134
|
if args and args[0].isdigit():
|
|
2135
2135
|
test_rounds = int(args[0])
|
|
2136
|
-
|
|
2136
|
+
|
|
2137
2137
|
render_speed_test_in_terminal(product_mode, test_rounds,enable_long_context=enable_long_context)
|
|
2138
2138
|
## 等待优化,获取明细数据
|
|
2139
2139
|
result_manager.add_result(content="models test success",meta={
|
|
@@ -2142,7 +2142,7 @@ def manage_models(query: str):
|
|
|
2142
2142
|
"query": query
|
|
2143
2143
|
}
|
|
2144
2144
|
})
|
|
2145
|
-
|
|
2145
|
+
|
|
2146
2146
|
elif subcmd == "/add":
|
|
2147
2147
|
# Support both simplified and legacy formats
|
|
2148
2148
|
args = query.strip().split(" ")
|
|
@@ -2548,12 +2548,12 @@ def auto_command(params,query: str):
|
|
|
2548
2548
|
from autocoder.commands.auto_command import CommandAutoTuner, AutoCommandRequest, CommandConfig, MemoryConfig
|
|
2549
2549
|
args = get_final_config()
|
|
2550
2550
|
# help(query)
|
|
2551
|
-
|
|
2551
|
+
|
|
2552
2552
|
# 准备请求参数
|
|
2553
2553
|
request = AutoCommandRequest(
|
|
2554
2554
|
user_input=query
|
|
2555
2555
|
)
|
|
2556
|
-
|
|
2556
|
+
|
|
2557
2557
|
# 初始化调优器
|
|
2558
2558
|
llm = get_single_llm(args.chat_model or args.model,product_mode=args.product_mode)
|
|
2559
2559
|
tuner = CommandAutoTuner(llm,
|
|
@@ -2581,7 +2581,7 @@ def auto_command(params,query: str):
|
|
|
2581
2581
|
execute_shell_command=execute_shell_command,
|
|
2582
2582
|
generate_shell_command=generate_shell_command
|
|
2583
2583
|
))
|
|
2584
|
-
|
|
2584
|
+
|
|
2585
2585
|
# 生成建议
|
|
2586
2586
|
response = tuner.analyze(request)
|
|
2587
2587
|
printer = Printer()
|
|
@@ -2593,7 +2593,7 @@ def auto_command(params,query: str):
|
|
|
2593
2593
|
border_style="blue",
|
|
2594
2594
|
padding=(1, 2)
|
|
2595
2595
|
))
|
|
2596
|
-
|
|
2596
|
+
|
|
2597
2597
|
|
|
2598
2598
|
def main():
|
|
2599
2599
|
from autocoder.rag.variable_holder import VariableHolder
|
|
@@ -2606,20 +2606,20 @@ def main():
|
|
|
2606
2606
|
VariableHolder.TOKENIZER_MODEL = Tokenizer.from_file(tokenizer_path)
|
|
2607
2607
|
except FileNotFoundError:
|
|
2608
2608
|
tokenizer_path = None
|
|
2609
|
-
|
|
2609
|
+
|
|
2610
2610
|
ARGS = parse_arguments()
|
|
2611
|
-
|
|
2611
|
+
|
|
2612
2612
|
if ARGS.lite:
|
|
2613
2613
|
ARGS.product_mode = "lite"
|
|
2614
|
-
|
|
2614
|
+
|
|
2615
2615
|
if ARGS.pro:
|
|
2616
2616
|
ARGS.product_mode = "pro"
|
|
2617
2617
|
|
|
2618
2618
|
if not ARGS.quick:
|
|
2619
2619
|
initialize_system(ARGS)
|
|
2620
|
-
|
|
2620
|
+
|
|
2621
2621
|
load_memory()
|
|
2622
|
-
|
|
2622
|
+
|
|
2623
2623
|
configure(f"product_mode:{ARGS.product_mode}")
|
|
2624
2624
|
|
|
2625
2625
|
MODES = {
|
|
@@ -2680,7 +2680,11 @@ def main():
|
|
|
2680
2680
|
human_as_model = memory["conf"].get("human_as_model", "false")
|
|
2681
2681
|
if mode not in MODES:
|
|
2682
2682
|
mode = "auto_detect"
|
|
2683
|
-
|
|
2683
|
+
pwd = os.getcwd()
|
|
2684
|
+
pwd_parts = pwd.split(os.sep)
|
|
2685
|
+
if len(pwd_parts) > 3:
|
|
2686
|
+
pwd = os.sep.join(pwd_parts[-3:])
|
|
2687
|
+
return f"Current Dir: {pwd} \nMode: {MODES[mode]} | Human as Model: {human_as_model} "
|
|
2684
2688
|
|
|
2685
2689
|
session = PromptSession(
|
|
2686
2690
|
history=InMemoryHistory(),
|
|
@@ -2773,14 +2777,14 @@ def main():
|
|
|
2773
2777
|
|
|
2774
2778
|
elif user_input.startswith("/index/build"):
|
|
2775
2779
|
index_build()
|
|
2776
|
-
|
|
2780
|
+
|
|
2777
2781
|
elif user_input.startswith("/index/export"):
|
|
2778
2782
|
export_path = user_input[len("/index/export"):].strip()
|
|
2779
2783
|
if not export_path:
|
|
2780
2784
|
print("Please specify the export path")
|
|
2781
2785
|
else:
|
|
2782
2786
|
index_export(export_path)
|
|
2783
|
-
|
|
2787
|
+
|
|
2784
2788
|
elif user_input.startswith("/index/import"):
|
|
2785
2789
|
import_path = user_input[len("/index/import"):].strip()
|
|
2786
2790
|
if not import_path:
|
|
@@ -2822,7 +2826,7 @@ def main():
|
|
|
2822
2826
|
show_help()
|
|
2823
2827
|
else:
|
|
2824
2828
|
help(query)
|
|
2825
|
-
|
|
2829
|
+
|
|
2826
2830
|
elif user_input.startswith("/exclude_dirs"):
|
|
2827
2831
|
dir_names = user_input[len(
|
|
2828
2832
|
"/exclude_dirs"):].strip().split(",")
|
autocoder/common/__init__.py
CHANGED
|
@@ -359,6 +359,9 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
359
359
|
data_cells_max_num: Optional[int] = 2000
|
|
360
360
|
generate_times_same_model: Optional[int] = 1
|
|
361
361
|
rank_times_same_model: Optional[int] = 1
|
|
362
|
+
|
|
363
|
+
# block:给定每个文件修改的代码块 file: 给定每个文件修改前后内容
|
|
364
|
+
rank_strategy: Optional[str] = "file"
|
|
362
365
|
|
|
363
366
|
action: List[str] = []
|
|
364
367
|
enable_global_memory: Optional[bool] = True
|
|
@@ -374,6 +377,9 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
374
377
|
conversation_prune_group_size: Optional[int] = 4
|
|
375
378
|
conversation_prune_strategy: Optional[str] = "summarize"
|
|
376
379
|
|
|
380
|
+
context_prune_strategy: Optional[str] = "score"
|
|
381
|
+
context_prune: Optional[bool] = True
|
|
382
|
+
|
|
377
383
|
auto_command_max_iterations: Optional[int] = 10
|
|
378
384
|
|
|
379
385
|
skip_commit: Optional[bool] = False
|
|
@@ -137,7 +137,9 @@ MESSAGES = {
|
|
|
137
137
|
"invalid_enum_value": "Value '{{value}}' is not in allowed values ({{allowed}})",
|
|
138
138
|
"no_changes_made": "⚠️ no changes made, the reason may be that the text block generated by the coding function has a problem, so it cannot be merged into the project",
|
|
139
139
|
"conversation_pruning_start": "⚠️ Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
|
|
140
|
-
"invalid_file_number": "⚠️ Invalid file number {{file_number}}, total files: {{total_files}}"
|
|
140
|
+
"invalid_file_number": "⚠️ Invalid file number {{file_number}}, total files: {{total_files}}",
|
|
141
|
+
"all_merge_results_failed": "⚠️ All merge attempts failed, returning first candidate",
|
|
142
|
+
"only_one_merge_result_success": "✅ Only one merge result succeeded, returning that candidate"
|
|
141
143
|
},
|
|
142
144
|
"zh": {
|
|
143
145
|
"config_validation_error": "配置验证错误: {{error}}",
|
|
@@ -272,7 +274,9 @@ MESSAGES = {
|
|
|
272
274
|
"auto_command_analyzed": "被选择指令",
|
|
273
275
|
"invalid_enum_value": "值 '{{value}}' 不在允许的值列表中 ({{allowed}})",
|
|
274
276
|
"conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。",
|
|
275
|
-
"invalid_file_number": "⚠️ 无效的文件编号 {{file_number}},总文件数为 {{total_files}}"
|
|
277
|
+
"invalid_file_number": "⚠️ 无效的文件编号 {{file_number}},总文件数为 {{total_files}}",
|
|
278
|
+
"all_merge_results_failed": "⚠️ 所有合并尝试都失败,返回第一个候选",
|
|
279
|
+
"only_one_merge_result_success": "✅ 只有一个合并结果成功,返回该候选"
|
|
276
280
|
}}
|
|
277
281
|
|
|
278
282
|
|
|
@@ -359,16 +359,16 @@ class CodeAutoGenerateDiff:
|
|
|
359
359
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
360
360
|
futures = []
|
|
361
361
|
for llm in self.llms:
|
|
362
|
+
|
|
363
|
+
model_names_list = llm_utils.get_llm_names(llm)
|
|
364
|
+
model_name = None
|
|
365
|
+
if model_names_list:
|
|
366
|
+
model_name = model_names_list[0]
|
|
367
|
+
|
|
362
368
|
for _ in range(self.generate_times_same_model):
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
model_name = model_names_list[0]
|
|
367
|
-
|
|
368
|
-
for _ in range(self.generate_times_same_model):
|
|
369
|
-
model_names.append(model_name)
|
|
370
|
-
futures.append(executor.submit(
|
|
371
|
-
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
369
|
+
model_names.append(model_name)
|
|
370
|
+
futures.append(executor.submit(
|
|
371
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
372
372
|
|
|
373
373
|
temp_results = [future.result() for future in futures]
|
|
374
374
|
for result in temp_results:
|
|
@@ -73,15 +73,35 @@ class CodeAutoMerge:
|
|
|
73
73
|
def choose_best_choice(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
|
|
74
74
|
if len(generate_result.contents) == 1:
|
|
75
75
|
return generate_result
|
|
76
|
+
|
|
77
|
+
merge_results = []
|
|
78
|
+
for content,conversations in zip(generate_result.contents,generate_result.conversations):
|
|
79
|
+
merge_result = self._merge_code_without_effect(content)
|
|
80
|
+
merge_results.append(merge_result)
|
|
76
81
|
|
|
82
|
+
# If all merge results are None, return first one
|
|
83
|
+
if all(len(result.failed_blocks) != 0 for result in merge_results):
|
|
84
|
+
self.printer.print_in_terminal("all_merge_results_failed")
|
|
85
|
+
return CodeGenerateResult(contents=[generate_result.contents[0]], conversations=[generate_result.conversations[0]])
|
|
86
|
+
|
|
87
|
+
# If only one merge result is not None, return that one
|
|
88
|
+
not_none_indices = [i for i, result in enumerate(merge_results) if len(result.failed_blocks) == 0]
|
|
89
|
+
if len(not_none_indices) == 1:
|
|
90
|
+
idx = not_none_indices[0]
|
|
91
|
+
self.printer.print_in_terminal("only_one_merge_result_success")
|
|
92
|
+
return CodeGenerateResult(contents=[generate_result.contents[idx]], conversations=[generate_result.conversations[idx]])
|
|
93
|
+
|
|
94
|
+
# 最后,如果有多个,那么根据质量排序再返回
|
|
77
95
|
ranker = CodeModificationRanker(self.llm, self.args)
|
|
78
|
-
ranked_result = ranker.rank_modifications(generate_result)
|
|
79
|
-
|
|
96
|
+
ranked_result = ranker.rank_modifications(generate_result,merge_results)
|
|
97
|
+
|
|
98
|
+
## 得到的结果,再做一次合并,第一个通过的返回 , 返回做合并有点重复低效,未来修改。
|
|
80
99
|
for content,conversations in zip(ranked_result.contents,ranked_result.conversations):
|
|
81
100
|
merge_result = self._merge_code_without_effect(content)
|
|
82
101
|
if not merge_result.failed_blocks:
|
|
83
102
|
return CodeGenerateResult(contents=[content], conversations=[conversations])
|
|
84
|
-
|
|
103
|
+
|
|
104
|
+
# 最后保底,但实际不会出现
|
|
85
105
|
return CodeGenerateResult(contents=[ranked_result.contents[0]], conversations=[ranked_result.conversations[0]])
|
|
86
106
|
|
|
87
107
|
|
|
@@ -387,15 +387,35 @@ class CodeAutoMergeDiff:
|
|
|
387
387
|
def choose_best_choice(self, generate_result: CodeGenerateResult) -> CodeGenerateResult:
|
|
388
388
|
if len(generate_result.contents) == 1:
|
|
389
389
|
return generate_result
|
|
390
|
+
|
|
391
|
+
merge_results = []
|
|
392
|
+
for content,conversations in zip(generate_result.contents,generate_result.conversations):
|
|
393
|
+
merge_result = self._merge_code_without_effect(content)
|
|
394
|
+
merge_results.append(merge_result)
|
|
390
395
|
|
|
396
|
+
# If all merge results are None, return first one
|
|
397
|
+
if all(len(result.failed_blocks) != 0 for result in merge_results):
|
|
398
|
+
self.printer.print_in_terminal("all_merge_results_failed")
|
|
399
|
+
return CodeGenerateResult(contents=[generate_result.contents[0]], conversations=[generate_result.conversations[0]])
|
|
400
|
+
|
|
401
|
+
# If only one merge result is not None, return that one
|
|
402
|
+
not_none_indices = [i for i, result in enumerate(merge_results) if len(result.failed_blocks) == 0]
|
|
403
|
+
if len(not_none_indices) == 1:
|
|
404
|
+
idx = not_none_indices[0]
|
|
405
|
+
self.printer.print_in_terminal("only_one_merge_result_success")
|
|
406
|
+
return CodeGenerateResult(contents=[generate_result.contents[idx]], conversations=[generate_result.conversations[idx]])
|
|
407
|
+
|
|
408
|
+
# 最后,如果有多个,那么根据质量排序再返回
|
|
391
409
|
ranker = CodeModificationRanker(self.llm, self.args)
|
|
392
|
-
ranked_result = ranker.rank_modifications(generate_result)
|
|
393
|
-
|
|
410
|
+
ranked_result = ranker.rank_modifications(generate_result,merge_results)
|
|
411
|
+
|
|
412
|
+
## 得到的结果,再做一次合并,第一个通过的返回 , 返回做合并有点重复低效,未来修改。
|
|
394
413
|
for content,conversations in zip(ranked_result.contents,ranked_result.conversations):
|
|
395
414
|
merge_result = self._merge_code_without_effect(content)
|
|
396
415
|
if not merge_result.failed_blocks:
|
|
397
416
|
return CodeGenerateResult(contents=[content], conversations=[conversations])
|
|
398
|
-
|
|
417
|
+
|
|
418
|
+
# 最后保底,但实际不会出现
|
|
399
419
|
return CodeGenerateResult(contents=[ranked_result.contents[0]], conversations=[ranked_result.conversations[0]])
|
|
400
420
|
|
|
401
421
|
@byzerllm.prompt(render="jinja2")
|
|
@@ -440,6 +460,11 @@ class CodeAutoMergeDiff:
|
|
|
440
460
|
errors = []
|
|
441
461
|
for path, hunk in uniq:
|
|
442
462
|
full_path = self.abs_root_path(path)
|
|
463
|
+
|
|
464
|
+
if not os.path.exists(full_path):
|
|
465
|
+
with open(full_path, "w",encoding="utf-8") as f:
|
|
466
|
+
f.write("")
|
|
467
|
+
|
|
443
468
|
content = FileUtils.read_file(full_path)
|
|
444
469
|
|
|
445
470
|
original, _ = hunk_to_before_after(hunk)
|