auto-coder 0.1.256__py3-none-any.whl → 0.1.258__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.256
3
+ Version: 0.1.258
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter-client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas] >=0.1.164
29
+ Requires-Dist: byzerllm[saas] >=0.1.165
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff-match-patch
32
32
  Requires-Dist: GitPython
@@ -1,17 +1,17 @@
1
1
  autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autocoder/auto_coder.py,sha256=obRx5lFu8P4M5mmLA717lwMso7Ei3Kx3i9kG7I_xrRY,65654
2
+ autocoder/auto_coder.py,sha256=whonXfuZrcveFliNXLglxo6CXtwzIqo-7QppjJqag0g,64811
3
3
  autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
4
4
  autocoder/auto_coder_rag.py,sha256=DDAmqw36CO6phtdQuN8LYIbIR3YGdoZw5-pG0LjVxMc,29063
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
8
8
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
9
- autocoder/chat_auto_coder.py,sha256=a1YEp6OPMzpLbRpr2hrbzF6pRnhVPTVxyZfBiQHFPIw,109283
10
- autocoder/chat_auto_coder_lang.py,sha256=1cJrjFGrcOQnuP2LdZpgGDSX4CNaIYI7KZGvEEtj6_Q,18242
9
+ autocoder/chat_auto_coder.py,sha256=tfr0qu5yMHZFetL4ef3FvB7d29kjY8qNxW6YKBLXCAA,109659
10
+ autocoder/chat_auto_coder_lang.py,sha256=WxylHYFHqBxM_6YvoqIrPdNQFlobYT1t07xlCISfWJw,18241
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
- autocoder/models.py,sha256=xwWPcegwx945g433UZXna-7HBdnHWCq8oEfHm-HKIDQ,8651
14
- autocoder/version.py,sha256=Mlq4zYTZeRq2mquyM-8m1qr6sjxAHZpSDVyjrKqhayc,23
13
+ autocoder/models.py,sha256=rG7ckiKlers-XoO1gWxNK-Y-IbqD82WS3qFMPHqvFsc,9072
14
+ autocoder/version.py,sha256=ZqpPzT_HdggyvazgM4cfgBLNgZaYhDcBMd4HUgyfqGc,23
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -25,19 +25,19 @@ autocoder/agent/project_reader.py,sha256=tWLaPoLw1gI6kO_NzivQj28KbobU2ceOLuppHMb
25
25
  autocoder/chat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  autocoder/common/JupyterClient.py,sha256=O-wi6pXeAEYhAY24kDa0BINrLYvKS6rKyWe98pDClS0,2816
27
27
  autocoder/common/ShellClient.py,sha256=fM1q8t_XMSbLBl2zkCNC2J9xuyKN3eXzGm6hHhqL2WY,2286
28
- autocoder/common/__init__.py,sha256=6maackdzrYnUPvpgVPl92JdMOnw7X4n3EnEQA9OnLGE,11984
28
+ autocoder/common/__init__.py,sha256=wJIAB6EOeCmo9UXCGC7E9u_GGWO5DjXW78wUi3hz1lI,12272
29
29
  autocoder/common/anything2images.py,sha256=0ILBbWzY02M-CiWB-vzuomb_J1hVdxRcenAfIrAXq9M,25283
30
30
  autocoder/common/anything2img.py,sha256=4TREa-sOA-iargieUy7MpyCYVUE-9Mmq0wJtwomPqnE,7662
31
31
  autocoder/common/audio.py,sha256=Kn9nWKQddWnUrAz0a_ZUgjcu4VUU_IcZBigT7n3N3qc,7439
32
- autocoder/common/auto_coder_lang.py,sha256=x9Zjwvu9OZJjTmswwyimlMb1pvngUAF9_3oNQQut2i4,17634
32
+ autocoder/common/auto_coder_lang.py,sha256=xxmO4Htil4S2KvWa3RRi9rMgrod_io1MbctJkRXBfDg,20760
33
33
  autocoder/common/buildin_tokenizer.py,sha256=L7d5t39ZFvUd6EoMPXUhYK1toD0FHlRH1jtjKRGokWU,1236
34
34
  autocoder/common/chunk_validation.py,sha256=BrR_ZWavW8IANuueEE7hS8NFAwEvm8TX34WnPx_1hs8,3030
35
35
  autocoder/common/cleaner.py,sha256=NU72i8C6o9m0vXExab7nao5bstBUsfJFcj11cXa9l4U,1089
36
36
  autocoder/common/code_auto_execute.py,sha256=4KXGmiGObr_B1d6tzV9dwS6MifCSc3Gm4j2d6ildBXQ,6867
37
- autocoder/common/code_auto_generate.py,sha256=E8r3VI88hPBPhU5t56qnmeL_fWtCWX1CJvaOachGa2Y,12014
38
- autocoder/common/code_auto_generate_diff.py,sha256=dmMgN1yIOjJfiYFnzXZuktVFdj4_XR_Tavwx_ysm53U,17846
39
- autocoder/common/code_auto_generate_editblock.py,sha256=NI_dFwy1VhvdjvARb04-B1AGfgW9z4P1BfWMm-blnaU,19447
40
- autocoder/common/code_auto_generate_strict_diff.py,sha256=uf5P5B8ly0MP3jCK2PaYJiPLktd1cRRPouwkkaf-DfY,16457
37
+ autocoder/common/code_auto_generate.py,sha256=74wCscxVEnY_VDkHcr-QA3b79RhDR_OeVPOI7UKRJwA,13040
38
+ autocoder/common/code_auto_generate_diff.py,sha256=bns5KZq9ozvUtyqIUWsDNUtah-TTOsE7yRXHYGlrtT4,18872
39
+ autocoder/common/code_auto_generate_editblock.py,sha256=LcGfG4bJVCVsWehex7MYWDF4NX0B2Rp2ALSh-27MclA,20472
40
+ autocoder/common/code_auto_generate_strict_diff.py,sha256=JvKnD5Ph3JtAiVIO_k_XKUnVBeUxwLw_AHF_xWWtX7c,17488
41
41
  autocoder/common/code_auto_merge.py,sha256=-ksBjj4ZVcbY_tVH4JLXAMSRtsgaSxrSZ5-MOl9cAgE,7354
42
42
  autocoder/common/code_auto_merge_diff.py,sha256=qpEuHJEgX6sWK7EDFEKqcYkyI28wOyM4pytyl8BLohY,15350
43
43
  autocoder/common/code_auto_merge_editblock.py,sha256=sxgYMLMACRwJvw-bABkdDHezPelsDFrOCpGuhtT5Dzs,17504
@@ -76,20 +76,22 @@ autocoder/db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  autocoder/db/store.py,sha256=tFT66bP2ZKIqZip-uhLkHRSLaaOAUUDZfozJwcqix3c,1908
77
77
  autocoder/dispacher/__init__.py,sha256=YoA64dIxnx4jcE1pwSfg81sjkQtjDkhddkfac1-cMWo,1230
78
78
  autocoder/dispacher/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
79
- autocoder/dispacher/actions/action.py,sha256=sfh3pCasy622Jm0_AIKU7xtR-tqY2tS2_9YJsEd0FJY,22753
79
+ autocoder/dispacher/actions/action.py,sha256=k5ank4nHYQ-Rh6gIeeVgWJL42aNVOtgkDZYSZ0jhhiQ,23502
80
80
  autocoder/dispacher/actions/copilot.py,sha256=iMh4ckj9hO5Q-iemF3CStXd7DatWai7Eci5zOlKxK9c,13072
81
81
  autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
- autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=22EZL3mLFxgsEZ8ymPCGvaHCJFnrW6C_prp1ykYCuEY,6335
82
+ autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=rKQtRo2icVrBhzkn1HNhch1eozMvvBo8x-_G1sDMIBY,6495
83
83
  autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
84
84
  autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
85
- autocoder/index/entry.py,sha256=1KIGPCtxQN0OdErAco9OmGTd5hB8WJTpWGrxsGLsTcE,12634
85
+ autocoder/index/entry.py,sha256=H897HGBsIi9bf0T-M-i3IWQcFG6uFRSjX70eicD5oto,12795
86
86
  autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
87
- autocoder/index/index.py,sha256=hVAIyF10N9hxKMWHA_ibYygGRZYJQZfZxRRrhQhrTvk,21225
87
+ autocoder/index/index.py,sha256=GeofteDTq4Ye0cSBuK1CqQD43NMrrHOg5dfbv_7fVzk,25312
88
88
  autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
89
89
  autocoder/index/types.py,sha256=a2s_KV5FJlq7jqA2ELSo9E1sjuLwDB-JJYMhSpzBAhU,596
90
90
  autocoder/index/filter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
91
  autocoder/index/filter/normal_filter.py,sha256=APu34iSvWhtlLtWgkj8N3Vo4oW1TegtZQq2bwDX_cs4,8031
92
92
  autocoder/index/filter/quick_filter.py,sha256=5toipv7XwLsmG_UaqrElpGNjKXq_0bcvFr8W80vT44g,15206
93
+ autocoder/privacy/__init__.py,sha256=LnIVvGu_K66zCE-yhN_-dPO8R80pQyedCsXJ7wRqQaI,72
94
+ autocoder/privacy/model_filter.py,sha256=-N9ZvxxDKpxU7hkn-tKv-QHyXjvkCopUaKgvJwTOGQs,3369
93
95
  autocoder/pyproject/__init__.py,sha256=bRuGxFV4QyE85xVjDzeMFmlLVqGbbcFs09FI15Uss4Q,14423
94
96
  autocoder/rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
97
  autocoder/rag/api_server.py,sha256=dRbhAZVRAOlZ64Cnxf4_rKb4iJwHnrWS9Zr67IVORw0,7288
@@ -133,6 +135,7 @@ autocoder/utils/conversation_store.py,sha256=sz-hhY7sttPAUOAQU6Pze-5zJc3j0_Emj22
133
135
  autocoder/utils/llm_client_interceptors.py,sha256=FEHNXoFZlCjAHQcjPRyX8FOMjo6rPXpO2AJ2zn2KTTo,901
134
136
  autocoder/utils/llms.py,sha256=HM5K_v4AcuWo65lgcp66DEqaU9-fjoT7mcI1iv2Fopg,3839
135
137
  autocoder/utils/log_capture.py,sha256=I-bsJFLWoGUiX-GKoZsH9kWJCKSV7ZlUnRt7jh-fOL0,1548
138
+ autocoder/utils/model_provider_selector.py,sha256=g5O9frBWkXR7iqjYDdTvhoxzTQx0NaPVPu9M2ItqhpE,7602
136
139
  autocoder/utils/multi_turn.py,sha256=unK9OpqVRbK6uIcTKXgggX2wNmyj7s5eyEAQ2xUwHoM,88
137
140
  autocoder/utils/operate_config_api.py,sha256=99YAKsuUFLPwrRvj0CJal_bAPgyiXWMma6ZKMU56thw,5790
138
141
  autocoder/utils/print_table.py,sha256=ZMRhCA9DD0FUfKyJBWd5bDdj1RrtPtgOMWSJwtvZcLs,403
@@ -146,9 +149,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
146
149
  autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
147
150
  autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=xWXqICANbDOovH4wcFW1eSI7lB7TjXbk1mSU4bTKEW4,11434
148
151
  autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
149
- auto_coder-0.1.256.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
150
- auto_coder-0.1.256.dist-info/METADATA,sha256=NrC0Y2oSS6lDTeKbXKk3F1QnZ7_3Kie7pho_i7JhwJk,2616
151
- auto_coder-0.1.256.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
152
- auto_coder-0.1.256.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
153
- auto_coder-0.1.256.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
154
- auto_coder-0.1.256.dist-info/RECORD,,
152
+ auto_coder-0.1.258.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
153
+ auto_coder-0.1.258.dist-info/METADATA,sha256=t48Rt7ffsIuQKceJzcS7igvP4sDu_40I3DkSG2vYhBE,2616
154
+ auto_coder-0.1.258.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
155
+ auto_coder-0.1.258.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
156
+ auto_coder-0.1.258.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
157
+ auto_coder-0.1.258.dist-info/RECORD,,
autocoder/auto_coder.py CHANGED
@@ -47,6 +47,8 @@ from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
47
47
  from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
48
48
  from autocoder.common.printer import Printer
49
49
  from autocoder.rag.token_counter import count_tokens
50
+ from autocoder.privacy.model_filter import ModelPathFilter
51
+
50
52
  console = Console()
51
53
 
52
54
 
@@ -317,53 +319,7 @@ def main(input_args: Optional[List[str]] = None):
317
319
  "saas.model": model_info["model_name"],
318
320
  "saas.is_reasoning": model_info["is_reasoning"]
319
321
  }
320
- )
321
-
322
- if models_module.check_model_exists("deepseek_r1_chat"):
323
- r1_model_info = models_module.get_model_by_name("deepseek_r1_chat")
324
- api_key = r1_model_info["api_key"]
325
- chat_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
326
- chat_llm.deploy(
327
- model_path="",
328
- pretrained_model_type="saas/openai",
329
- udf_name="deepseek_r1_chat",
330
- infer_params={
331
- "saas.base_url": "https://api.deepseek.com/v1",
332
- "saas.api_key": api_key,
333
- "saas.model": "deepseek-reasoner",
334
- "saas.is_reasoning": True
335
- }
336
- )
337
-
338
- generate_rerank_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
339
- generate_rerank_llm.deploy(
340
- model_path="",
341
- pretrained_model_type="saas/openai",
342
- udf_name="deepseek_r1_chat",
343
- infer_params={
344
- "saas.base_url": "https://api.deepseek.com/v1",
345
- "saas.api_key": api_key,
346
- "saas.model": "deepseek-reasoner",
347
- "saas.is_reasoning": True
348
- }
349
- )
350
-
351
- index_filter_llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_r1_chat")
352
- index_filter_llm.deploy(
353
- model_path="",
354
- pretrained_model_type="saas/openai",
355
- udf_name="deepseek_r1_chat",
356
- infer_params={
357
- "saas.base_url": "https://api.deepseek.com/v1",
358
- "saas.api_key": api_key,
359
- "saas.model": "deepseek-reasoner",
360
- "saas.is_reasoning": True
361
- }
362
- )
363
-
364
- llm.setup_sub_client("chat_model", chat_llm)
365
- llm.setup_sub_client("generate_rerank_model", generate_rerank_llm)
366
- llm.setup_sub_client("index_filter_model", index_filter_llm)
322
+ )
367
323
 
368
324
  if args.product_mode == "lite":
369
325
  # Set up default models based on configuration
@@ -947,6 +903,17 @@ def main(input_args: Optional[List[str]] = None):
947
903
  # )
948
904
  return
949
905
  elif raw_args.agent_command == "project_reader":
906
+
907
+ target_llm = llm.get_sub_client("planner_model")
908
+ if not target_llm:
909
+ target_llm = llm
910
+ model_filter = ModelPathFilter.from_model_object(target_llm, args)
911
+ if model_filter.has_rules():
912
+ printer = Printer()
913
+ msg = printer.get_message_from_key_with_format("model_has_access_restrictions",
914
+ model_name=",".join(get_llm_names(target_llm)))
915
+ raise ValueError(msg)
916
+
950
917
  from autocoder.agent.project_reader import ProjectReader
951
918
 
952
919
  project_reader = ProjectReader(args, llm)
@@ -1194,10 +1161,23 @@ def main(input_args: Optional[List[str]] = None):
1194
1161
  else:
1195
1162
  pp = SuffixProject(args=args, llm=llm, file_filter=None)
1196
1163
  pp.run()
1197
- sources = pp.sources
1198
-
1164
+ sources = pp.sources
1165
+
1166
+ # Apply model filter for chat_llm
1167
+ model_filter = ModelPathFilter.from_model_object(chat_llm, args)
1168
+ filtered_sources = []
1169
+ printer = Printer()
1170
+ for source in sources:
1171
+ if model_filter.is_accessible(source.module_name):
1172
+ filtered_sources.append(source)
1173
+ else:
1174
+ printer.print_in_terminal("index_file_filtered",
1175
+ style="yellow",
1176
+ file_path=source.module_name,
1177
+ model_name=",".join(get_llm_names(chat_llm)))
1178
+
1199
1179
  s = build_index_and_filter_files(
1200
- llm=llm, args=args, sources=sources)
1180
+ llm=llm, args=args, sources=filtered_sources).to_str()
1201
1181
 
1202
1182
  if s:
1203
1183
  pre_conversations.append(
@@ -297,23 +297,22 @@ def initialize_system(args):
297
297
 
298
298
  init_project()
299
299
 
300
- if args.product_mode == "lite":
301
- # Setup deepseek api key
302
- api_key_dir = os.path.expanduser("~/.auto-coder/keys")
303
- api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
304
-
305
- if not os.path.exists(api_key_file):
306
- print_status(get_message("model_not_available"), "warning")
307
- api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
308
-
309
- # Create directory if it doesn't exist
310
- os.makedirs(api_key_dir, exist_ok=True)
311
-
312
- # Save the API key
313
- with open(api_key_file, "w") as f:
314
- f.write(api_key)
315
-
316
- print_status(f"API key saved successfully: {api_key_file}", "success")
300
+ if args.product_mode == "lite":
301
+ from autocoder.utils.model_provider_selector import ModelProviderSelector
302
+ from autocoder import models as models_module
303
+ if not models_module.check_model_exists("v3_chat") or not models_module.check_model_exists("r1_chat"):
304
+ model_provider_selector = ModelProviderSelector()
305
+ model_provider_info = model_provider_selector.select_provider()
306
+ if model_provider_info is not None:
307
+ models_json_list = model_provider_selector.to_models_json(model_provider_info)
308
+ models_module.add_and_activate_models(models_json_list)
309
+ r1_model = models_json_list[0]['name']
310
+ v3_model = models_json_list[1]['name']
311
+ configure(f"model:{v3_model}", skip_print=True)
312
+ configure(f"chat_model:{r1_model}", skip_print=True)
313
+ configure(f"generate_rerank_model:{r1_model}", skip_print=True)
314
+ configure(f"code_model:{v3_model}", skip_print=True)
315
+ configure(f"index_filter_model:{r1_model}", skip_print=True)
317
316
 
318
317
  if args.product_mode == "pro":
319
318
  # Check if Ray is running
@@ -128,7 +128,7 @@ MESSAGES = {
128
128
  "official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
129
129
  },
130
130
  "zh": {
131
- "commit_generating": "{{ model_name }} 正在生成提交信息...",
131
+ "commit_generating": "{{ model_name }} 正在生成提交信息...",
132
132
  "commit_message": "{{ model_name }} 生成的提交信息: {{ message }}",
133
133
  "commit_failed": "{{ model_name }} 生成提交信息失败: {{ error }}",
134
134
  "mcp_remove_error": "移除 MCP 服务器时出错:{error}",
@@ -14,7 +14,13 @@ class SourceCode(pydantic.BaseModel):
14
14
  tokens: int = -1
15
15
  metadata: Dict[str, Any] = {}
16
16
 
17
+ class SourceCodeList():
18
+ def __init__(self, sources: List[SourceCode]):
19
+ self.sources = sources
17
20
 
21
+ def to_str(self):
22
+ return "\n".join([f"##File: {source.module_name}\n{source.source_code}\n" for source in self.sources])
23
+
18
24
  class TranslateReadme(pydantic.BaseModel):
19
25
  filename: str = pydantic.Field(..., description="需要翻译的文件路径")
20
26
  content: str = pydantic.Field(..., description="翻译后的内容")
@@ -362,6 +368,7 @@ class AutoCoderArgs(pydantic.BaseModel):
362
368
  keep_only_reasoning_content: Optional[bool] = False
363
369
 
364
370
  in_code_apply: bool = False
371
+ model_filter_path: Optional[str] = None
365
372
 
366
373
  class Config:
367
374
  protected_namespaces = ()
@@ -3,6 +3,20 @@ from byzerllm.utils import format_str_jinja2
3
3
 
4
4
  MESSAGES = {
5
5
  "en": {
6
+ "model_provider_select_title": "Select Model Provider",
7
+ "model_provider_select_text": "Please select your model provider:",
8
+ "model_provider_volcano": "Volcano Engine",
9
+ "model_provider_siliconflow": "SiliconFlow AI",
10
+ "model_provider_deepseek": "DeepSeek Official",
11
+ "model_provider_api_key_title": "API Key",
12
+ "model_provider_volcano_api_key_text": "Please enter your Volcano Engine API key:",
13
+ "model_provider_volcano_r1_text": "Please enter your Volcano Engine R1 endpoint (format: ep-20250204215011-vzbsg):",
14
+ "model_provider_volcano_v3_text": "Please enter your Volcano Engine V3 endpoint (format: ep-20250204215011-vzbsg):",
15
+ "model_provider_siliconflow_api_key_text": "Please enter your SiliconFlow AI API key:",
16
+ "model_provider_deepseek_api_key_text": "Please enter your DeepSeek API key:",
17
+ "model_provider_selected": "Provider configuration completed successfully! You can use /models command to view, add and modify all models later.",
18
+ "model_provider_success_title": "Success",
19
+ "index_file_filtered": "File {{file_path}} is filtered by model {{model_name}} restrictions",
6
20
  "models_no_active": "No active models found",
7
21
  "models_speed_test_results": "Model Speed Test Results",
8
22
  "models_testing": "Testing model: {{name}}...",
@@ -11,7 +25,7 @@ MESSAGES = {
11
25
  "generation_cancelled": "[Interrupted] Generation cancelled",
12
26
  "model_not_found": "Model {{model_name}} not found",
13
27
  "generating_shell_script": "Generating Shell Script",
14
- "new_session_started": "New session started. Previous chat history has been archived.",
28
+ "new_session_started": "New session started. Previous chat history has been archived.",
15
29
  "memory_save_success": "✅ Saved to your memory",
16
30
  "file_decode_error": "Failed to decode file: {{file_path}}. Tried encodings: {{encodings}}",
17
31
  "file_write_error": "Failed to write file: {{file_path}}. Error: {{error}}",
@@ -21,7 +35,7 @@ MESSAGES = {
21
35
  "no_latest_commit": "Unable to get latest commit information",
22
36
  "code_review_error": "Code review process error: {{error}}",
23
37
  "index_file_too_large": "⚠️ File {{ file_path }} is too large ({{ file_size }} > {{ max_length }}), splitting into chunks...",
24
- "index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s",
38
+ "index_update_success": "✅ {{ model_name }} Successfully updated index for {{ file_path }} (md5: {{ md5 }}) in {{ duration }}s, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
25
39
  "index_build_error": "❌ {{ model_name }} Error building index for {{ file_path }}: {{ error }}",
26
40
  "index_build_summary": "📊 Total Files: {{ total_files }}, Need to Build Index: {{ num_files }}",
27
41
  "building_index_progress": "⏳ Building Index: {{ counter }}/{{ num_files }}...",
@@ -30,7 +44,7 @@ MESSAGES = {
30
44
  "index_threads_completed": "✅ Completed {{ completed_threads }}/{{ total_threads }} threads",
31
45
  "index_related_files_fail": "⚠️ Failed to find related files for chunk {{ chunk_count }}",
32
46
  "index_file_removed": "🗑️ Removed non-existent file index: {{ file_path }}",
33
- "index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files",
47
+ "index_file_saved": "💾 Saved index file, updated {{ updated_files }} files, removed {{ removed_files }} files, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
34
48
  "human_as_model_instructions": (
35
49
  "You are now in Human as Model mode. The content has been copied to your clipboard.\n"
36
50
  "The system is waiting for your input. When finished, enter 'EOF' on a new line to submit.\n"
@@ -81,7 +95,7 @@ MESSAGES = {
81
95
  "begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
82
96
  "stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
83
97
  "quick_filter_stats": "{{ model_names }} 快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
84
- "upsert_file": "✅ Updated file: {{ file_path }}",
98
+ "upsert_file": "✅ Updated file: {{ file_path }}",
85
99
  "unmerged_blocks_title": "Unmerged Blocks",
86
100
  "quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
87
101
  "quick_filter_failed": "❌ Quick filter failed: {{ error }}. ",
@@ -95,8 +109,23 @@ MESSAGES = {
95
109
  "quick_filter_tokens_len": "📊 Current index size: {{ tokens_len }} tokens",
96
110
  "estimated_chat_input_tokens": "Estimated chat input tokens: {{ estimated_input_tokens }}",
97
111
  "estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
112
+ "model_has_access_restrictions": "{{model_name}} has access restrictions, cannot use the current function",
98
113
  },
99
114
  "zh": {
115
+ "model_provider_select_title": "选择模型供应商",
116
+ "model_provider_select_text": "请选择您的模型供应商:",
117
+ "model_provider_volcano": "火山方舟",
118
+ "model_provider_siliconflow": "硅基流动",
119
+ "model_provider_deepseek": "DeepSeek官方",
120
+ "model_provider_api_key_title": "API密钥",
121
+ "model_provider_volcano_api_key_text": "请输入您的火山方舟API密钥:",
122
+ "model_provider_volcano_r1_text": "请输入您的火山方舟 R1 推理点(格式如: ep-20250204215011-vzbsg):",
123
+ "model_provider_volcano_v3_text": "请输入您的火山方舟 V3 推理点(格式如: ep-20250204215011-vzbsg):",
124
+ "model_provider_siliconflow_api_key_text": "请输入您的硅基流动API密钥:",
125
+ "model_provider_deepseek_api_key_text": "请输入您的DeepSeek API密钥:",
126
+ "model_provider_selected": "供应商配置已成功完成!后续你可以使用 /models 命令,查看,新增和修改所有模型",
127
+ "model_provider_success_title": "成功",
128
+ "index_file_filtered": "文件 {{file_path}} 被模型 {{model_name}} 的访问限制过滤",
100
129
  "models_no_active": "未找到激活的模型",
101
130
  "models_speed_test_results": "模型速度测试结果",
102
131
  "models_testing": "正在测试模型: {{name}}...",
@@ -114,7 +143,7 @@ MESSAGES = {
114
143
  "no_latest_commit": "无法获取最新的提交信息",
115
144
  "code_review_error": "代码审查过程出错: {{error}}",
116
145
  "index_file_too_large": "⚠️ 文件 {{ file_path }} 过大 ({{ file_size }} > {{ max_length }}), 正在分块处理...",
117
- "index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} ",
146
+ "index_update_success": "✅ {{ model_name }} 成功更新 {{ file_path }} 的索引 (md5: {{ md5 }}), 耗时 {{ duration }} 秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
118
147
  "index_build_error": "❌ {{ model_name }} 构建 {{ file_path }} 索引时出错: {{ error }}",
119
148
  "index_build_summary": "📊 总文件数: {{ total_files }}, 需要构建索引: {{ num_files }}",
120
149
  "building_index_progress": "⏳ 正在构建索引: {{ counter }}/{{ num_files }}...",
@@ -123,7 +152,7 @@ MESSAGES = {
123
152
  "index_threads_completed": "✅ 已完成 {{ completed_threads }}/{{ total_threads }} 个线程",
124
153
  "index_related_files_fail": "⚠️ 无法为块 {{ chunk_count }} 找到相关文件",
125
154
  "index_file_removed": "🗑️ 已移除不存在的文件索引:{{ file_path }}",
126
- "index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }} 个文件",
155
+ "index_file_saved": "💾 已保存索引文件,更新了 {{ updated_files }} 个文件,移除了 {{ removed_files }} 个文件,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
127
156
  "human_as_model_instructions": (
128
157
  "您现在处于人类作为模型模式。内容已复制到您的剪贴板。\n"
129
158
  "系统正在等待您的输入。完成后,在新行输入'EOF'提交。\n"
@@ -188,8 +217,8 @@ MESSAGES = {
188
217
  "quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
189
218
  "estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
190
219
  "estimated_input_tokens_in_generate": "生成代码({{ generate_mode }})预计输入token数: {{ estimated_input_tokens_in_generate }}",
191
- },
192
- }
220
+ "model_has_access_restrictions": "{{model_name}} 有访问限制,无法使用当前功能",
221
+ }}
193
222
 
194
223
 
195
224
  def get_system_language():
@@ -203,5 +232,6 @@ def get_message(key):
203
232
  lang = get_system_language()
204
233
  return MESSAGES.get(lang, MESSAGES['en']).get(key, MESSAGES['en'][key])
205
234
 
235
+
206
236
  def get_message_with_format(msg_key: str, **kwargs):
207
237
  return format_str_jinja2(get_message(msg_key), **kwargs)
@@ -11,6 +11,8 @@ import json
11
11
  from autocoder.common.printer import Printer
12
12
  from autocoder.rag.token_counter import count_tokens
13
13
  from autocoder.utils import llms as llm_utils
14
+ from autocoder.common import SourceCodeList
15
+ from autocoder.privacy.model_filter import ModelPathFilter
14
16
 
15
17
 
16
18
  class CodeAutoGenerate:
@@ -156,10 +158,27 @@ class CodeAutoGenerate:
156
158
  }
157
159
 
158
160
  def single_round_run(
159
- self, query: str, source_content: str
161
+ self, query: str, source_code_list: SourceCodeList
160
162
  ) -> Tuple[List[str], Dict[str, str]]:
161
163
  llm_config = {"human_as_model": self.args.human_as_model}
162
164
 
165
+ # Apply model filter for code_llm
166
+ printer = Printer()
167
+ for llm in self.llms:
168
+ model_filter = ModelPathFilter.from_model_object(llm, self.args)
169
+ filtered_sources = []
170
+ for source in source_code_list.sources:
171
+ if model_filter.is_accessible(source.module_name):
172
+ filtered_sources.append(source)
173
+ else:
174
+ printer.print_in_terminal("index_file_filtered",
175
+ style="yellow",
176
+ file_path=source.module_name,
177
+ model_name=",".join(llm_utils.get_llm_names(llm)))
178
+
179
+ source_code_list = SourceCodeList(filtered_sources)
180
+ source_content = source_code_list.to_str()
181
+
163
182
  if self.args.request_id and not self.args.skip_events:
164
183
  queue_communicate.send_event_no_wait(
165
184
  request_id=self.args.request_id,
@@ -262,10 +281,11 @@ class CodeAutoGenerate:
262
281
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
263
282
 
264
283
  def multi_round_run(
265
- self, query: str, source_content: str, max_steps: int = 10
284
+ self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
266
285
  ) -> Tuple[List[str], List[Dict[str, str]]]:
267
286
  llm_config = {"human_as_model": self.args.human_as_model}
268
287
  result = []
288
+ source_content = source_code_list.to_str()
269
289
 
270
290
  if self.args.template == "common":
271
291
  init_prompt = self.multi_round_instruction.prompt(
@@ -2,6 +2,7 @@ from typing import List, Dict, Tuple
2
2
  from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
+ from autocoder.privacy.model_filter import ModelPathFilter
5
6
  from autocoder.utils.queue_communicate import queue_communicate, CommunicateEvent, CommunicateEventType
6
7
  from autocoder.common import sys_prompt
7
8
  from concurrent.futures import ThreadPoolExecutor
@@ -10,7 +11,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
10
11
  from autocoder.common.printer import Printer
11
12
  from autocoder.rag.token_counter import count_tokens
12
13
  from autocoder.utils import llms as llm_utils
13
-
14
+ from autocoder.common import SourceCodeList
14
15
 
15
16
  class CodeAutoGenerateDiff:
16
17
  def __init__(
@@ -302,9 +303,10 @@ class CodeAutoGenerateDiff:
302
303
  }
303
304
 
304
305
  def single_round_run(
305
- self, query: str, source_content: str
306
+ self, query: str, source_code_list: SourceCodeList
306
307
  ) -> CodeGenerateResult:
307
308
  llm_config = {"human_as_model": self.args.human_as_model}
309
+ source_content = source_code_list.to_str()
308
310
 
309
311
  if self.args.template == "common":
310
312
  init_prompt = self.single_round_instruction.prompt(
@@ -410,11 +412,28 @@ class CodeAutoGenerateDiff:
410
412
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
411
413
 
412
414
  def multi_round_run(
413
- self, query: str, source_content: str, max_steps: int = 10
415
+ self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
414
416
  ) -> CodeGenerateResult:
417
+
418
+ # Apply model filter for code_llm
419
+ printer = Printer()
420
+ for llm in self.llms:
421
+ model_filter = ModelPathFilter.from_model_object(llm, self.args)
422
+ filtered_sources = []
423
+ for source in source_code_list.sources:
424
+ if model_filter.is_accessible(source.module_name):
425
+ filtered_sources.append(source)
426
+ else:
427
+ printer.print_in_terminal("index_file_filtered",
428
+ style="yellow",
429
+ file_path=source.path,
430
+ model_name=",".join(llm_utils.get_llm_names(llm)))
431
+
432
+ source_code_list = SourceCodeList(filtered_sources)
433
+
415
434
  llm_config = {"human_as_model": self.args.human_as_model}
416
435
  result = []
417
-
436
+ source_content = source_code_list.to_str()
418
437
  if self.args.template == "common":
419
438
  init_prompt = self.multi_round_instruction.prompt(
420
439
  instruction=query, content=source_content, context=self.args.context
@@ -3,6 +3,7 @@ from autocoder.common.types import Mode, CodeGenerateResult
3
3
  from autocoder.common import AutoCoderArgs
4
4
  import byzerllm
5
5
  from autocoder.common import sys_prompt
6
+ from autocoder.privacy.model_filter import ModelPathFilter
6
7
  from autocoder.utils.queue_communicate import (
7
8
  queue_communicate,
8
9
  CommunicateEvent,
@@ -14,6 +15,7 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
14
15
  from autocoder.common.printer import Printer
15
16
  from autocoder.rag.token_counter import count_tokens
16
17
  from autocoder.utils import llms as llm_utils
18
+ from autocoder.common import SourceCodeList
17
19
 
18
20
 
19
21
  class CodeAutoGenerateEditBlock:
@@ -384,10 +386,29 @@ class CodeAutoGenerateEditBlock:
384
386
  }
385
387
 
386
388
  def single_round_run(
387
- self, query: str, source_content: str
389
+ self, query: str, source_code_list: SourceCodeList
388
390
  ) -> CodeGenerateResult:
391
+
392
+ # Apply model filter for code_llm
393
+ printer = Printer()
394
+ for llm in self.llms:
395
+ model_filter = ModelPathFilter.from_model_object(llm, self.args)
396
+ filtered_sources = []
397
+ for source in source_code_list.sources:
398
+ if model_filter.is_accessible(source.module_name):
399
+ filtered_sources.append(source)
400
+ else:
401
+ printer.print_in_terminal("index_file_filtered",
402
+ style="yellow",
403
+ file_path=source.module_name,
404
+ model_name=",".join(llm_utils.get_llm_names(llm)))
405
+
406
+ source_code_list = SourceCodeList(filtered_sources)
407
+
389
408
  llm_config = {"human_as_model": self.args.human_as_model}
390
409
 
410
+ source_content = source_code_list.to_str()
411
+
391
412
  if self.args.template == "common":
392
413
  init_prompt = self.single_round_instruction.prompt(
393
414
  instruction=query, content=source_content, context=self.args.context
@@ -498,10 +519,11 @@ class CodeAutoGenerateEditBlock:
498
519
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
499
520
 
500
521
  def multi_round_run(
501
- self, query: str, source_content: str, max_steps: int = 10
522
+ self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
502
523
  ) -> CodeGenerateResult:
503
524
  llm_config = {"human_as_model": self.args.human_as_model}
504
525
  result = []
526
+ source_content = source_code_list.to_str()
505
527
 
506
528
  if self.args.template == "common":
507
529
  init_prompt = self.multi_round_instruction.prompt(
@@ -10,7 +10,8 @@ from autocoder.common.utils_code_auto_generate import chat_with_continue
10
10
  from autocoder.common.printer import Printer
11
11
  from autocoder.rag.token_counter import count_tokens
12
12
  from autocoder.utils import llms as llm_utils
13
-
13
+ from autocoder.common import SourceCodeList
14
+ from autocoder.privacy.model_filter import ModelPathFilter
14
15
  class CodeAutoGenerateStrictDiff:
15
16
  def __init__(
16
17
  self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs, action=None
@@ -272,9 +273,10 @@ class CodeAutoGenerateStrictDiff:
272
273
  }
273
274
 
274
275
  def single_round_run(
275
- self, query: str, source_content: str
276
+ self, query: str, source_code_list: SourceCodeList
276
277
  ) -> CodeGenerateResult:
277
278
  llm_config = {"human_as_model": self.args.human_as_model}
279
+ source_content = source_code_list.to_str()
278
280
 
279
281
  if self.args.template == "common":
280
282
  init_prompt = self.single_round_instruction.prompt(
@@ -379,10 +381,28 @@ class CodeAutoGenerateStrictDiff:
379
381
  return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
380
382
 
381
383
  def multi_round_run(
382
- self, query: str, source_content: str, max_steps: int = 10
384
+ self, query: str, source_code_list: SourceCodeList, max_steps: int = 10
383
385
  ) -> CodeGenerateResult:
386
+
387
+ # Apply model filter for code_llm
388
+ printer = Printer()
389
+ for llm in self.llms:
390
+ model_filter = ModelPathFilter.from_model_object(llm, self.args)
391
+ filtered_sources = []
392
+ for source in source_code_list.sources:
393
+ if model_filter.is_accessible(source.module_name):
394
+ filtered_sources.append(source)
395
+ else:
396
+ printer.print_in_terminal("index_file_filtered",
397
+ style="yellow",
398
+ file_path=source.module_name,
399
+ model_name=",".join(llm_utils.get_llm_names(llm)))
400
+
401
+ source_code_list = SourceCodeList(filtered_sources)
402
+
384
403
  llm_config = {"human_as_model": self.args.human_as_model}
385
404
  result = []
405
+ source_content = source_code_list.to_str()
386
406
 
387
407
  if self.args.template == "common":
388
408
  init_prompt = self.multi_round_instruction.prompt(