auto-coder 0.1.258__py3-none-any.whl → 0.1.259__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/METADATA +1 -1
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/RECORD +14 -14
- autocoder/auto_coder.py +1 -22
- autocoder/auto_coder_rag.py +7 -7
- autocoder/auto_coder_rag_client_mcp.py +1 -1
- autocoder/chat_auto_coder.py +144 -124
- autocoder/common/command_templates.py +7 -3
- autocoder/index/entry.py +1 -1
- autocoder/rag/raw_rag.py +1 -1
- autocoder/version.py +1 -1
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.258.dist-info → auto_coder-0.1.259.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
autocoder/auto_coder.py,sha256=
|
|
2
|
+
autocoder/auto_coder.py,sha256=_fWeOxGHcRXMMXlzZqo9f0j_GuzNweq68I7DkgRnQoM,63751
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
|
-
autocoder/auto_coder_rag.py,sha256=
|
|
5
|
-
autocoder/auto_coder_rag_client_mcp.py,sha256=
|
|
4
|
+
autocoder/auto_coder_rag.py,sha256=nwgsXO2-scssWStjX3S910tDp-OZXZRddSYrpyC4Nq0,29021
|
|
5
|
+
autocoder/auto_coder_rag_client_mcp.py,sha256=QRxUbjc6A8UmDMQ8lXgZkjgqtq3lgKYeatJbDY6rSo0,6270
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
7
|
autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
|
|
8
8
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
9
|
-
autocoder/chat_auto_coder.py,sha256=
|
|
9
|
+
autocoder/chat_auto_coder.py,sha256=327wiq2RMXPyiAe3m4Yh3JdnDy2G1AHmyqAFWwBRPrY,110835
|
|
10
10
|
autocoder/chat_auto_coder_lang.py,sha256=WxylHYFHqBxM_6YvoqIrPdNQFlobYT1t07xlCISfWJw,18241
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
13
|
autocoder/models.py,sha256=rG7ckiKlers-XoO1gWxNK-Y-IbqD82WS3qFMPHqvFsc,9072
|
|
14
|
-
autocoder/version.py,sha256=
|
|
14
|
+
autocoder/version.py,sha256=w4YfV0iY2RJqPATbfwM5loNkK_-swqYe1AMQy-1r1No,23
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -45,7 +45,7 @@ autocoder/common/code_auto_merge_strict_diff.py,sha256=P0nKNkBrFMybTSZ7kOdA_Jixo
|
|
|
45
45
|
autocoder/common/code_modification_ranker.py,sha256=oG9rCekGsYwE9gNdkIKQ6uKt6uaXpwrC17-FV5Wo-fQ,8187
|
|
46
46
|
autocoder/common/command_completer.py,sha256=IShrZJSpR-Q_MCj_aCVdVyscLYDKj5ZQK357QBcQ_oQ,9420
|
|
47
47
|
autocoder/common/command_generator.py,sha256=-hmbD_AnCa5HxL4BznuEfYAf_l8AxU5fAG5F0sM_fuE,2116
|
|
48
|
-
autocoder/common/command_templates.py,sha256=
|
|
48
|
+
autocoder/common/command_templates.py,sha256=lAdr0-iyJKY2dOH2mZ0Tm3GlT_a1Oj8mgdKXmDiQN3A,8654
|
|
49
49
|
autocoder/common/const.py,sha256=eTjhjh4Aj4CUzviJ81jaf3Y5cwqsLATySn2wJxaS6RQ,2911
|
|
50
50
|
autocoder/common/files.py,sha256=CguxG9digkWBJpRaILErZmL_G5ryPRahPmPFWGB7X18,1973
|
|
51
51
|
autocoder/common/git_utils.py,sha256=zxgQt2PukabV_21podylAUzTY7Xk60bsQ7MQYw4s-Tg,23234
|
|
@@ -82,7 +82,7 @@ autocoder/dispacher/actions/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
|
|
|
82
82
|
autocoder/dispacher/actions/plugins/action_regex_project.py,sha256=rKQtRo2icVrBhzkn1HNhch1eozMvvBo8x-_G1sDMIBY,6495
|
|
83
83
|
autocoder/dispacher/actions/plugins/action_translate.py,sha256=nVAtRSQpdGNmZxg1R_9zXG3AuTv3CHf2v7ODgj8u65c,7727
|
|
84
84
|
autocoder/index/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
|
-
autocoder/index/entry.py,sha256=
|
|
85
|
+
autocoder/index/entry.py,sha256=iuvORjbmkNXBaN8p-njwETCVN2mWwu_DIgCJeM2rzKs,12799
|
|
86
86
|
autocoder/index/for_command.py,sha256=BFvljE4t6VaMBGboZAuhUCzVK0EitCy_n5D_7FEnihw,3204
|
|
87
87
|
autocoder/index/index.py,sha256=GeofteDTq4Ye0cSBuK1CqQD43NMrrHOg5dfbv_7fVzk,25312
|
|
88
88
|
autocoder/index/symbols_utils.py,sha256=CjcjUVajmJZB75Ty3a7kMv1BZphrm-tIBAdOJv6uo-0,2037
|
|
@@ -101,7 +101,7 @@ autocoder/rag/llm_wrapper.py,sha256=sbDxCANiZyWb_ocqNgqu2oy3c2t8orPNRGleEs-Uwl8,
|
|
|
101
101
|
autocoder/rag/long_context_rag.py,sha256=wmNmGsXN8RAFl6e9HaVzlwISXV9D-3bvf2qiaWjRy7w,24646
|
|
102
102
|
autocoder/rag/rag_config.py,sha256=8LwFcTd8OJWWwi1_WY4IzjqgtT6RyE2j4PjxS5cCTDE,802
|
|
103
103
|
autocoder/rag/rag_entry.py,sha256=6TKtErZ0Us9XSV6HgRKXA6yR3SiZGPHpynOKSaR1wgE,2463
|
|
104
|
-
autocoder/rag/raw_rag.py,sha256=
|
|
104
|
+
autocoder/rag/raw_rag.py,sha256=BOr0YGf3umjqXOIDVO1LXQ0bIHx8hzBdiubND2ezyxc,2946
|
|
105
105
|
autocoder/rag/relevant_utils.py,sha256=OGfp98OXG4jr3jNmtHIeXGPF8mOlIbTnolPIVTZzYZU,929
|
|
106
106
|
autocoder/rag/simple_directory_reader.py,sha256=LkKreCkNdEOoL4fNhc3_hDoyyWTQUte4uqextISRz4U,24485
|
|
107
107
|
autocoder/rag/simple_rag.py,sha256=I902EUqOK1WM0Y2WFd7RzDJYofElvTZNLVCBtX5A9rc,14885
|
|
@@ -149,9 +149,9 @@ autocoder/utils/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
149
149
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
150
150
|
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=xWXqICANbDOovH4wcFW1eSI7lB7TjXbk1mSU4bTKEW4,11434
|
|
151
151
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
152
|
-
auto_coder-0.1.
|
|
153
|
-
auto_coder-0.1.
|
|
154
|
-
auto_coder-0.1.
|
|
155
|
-
auto_coder-0.1.
|
|
156
|
-
auto_coder-0.1.
|
|
157
|
-
auto_coder-0.1.
|
|
152
|
+
auto_coder-0.1.259.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
153
|
+
auto_coder-0.1.259.dist-info/METADATA,sha256=rL9SsZMRosv39MX73HxlLhTmYqStFnUl92EUQkv6uCo,2616
|
|
154
|
+
auto_coder-0.1.259.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
155
|
+
auto_coder-0.1.259.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
156
|
+
auto_coder-0.1.259.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
157
|
+
auto_coder-0.1.259.dist-info/RECORD,,
|
autocoder/auto_coder.py
CHANGED
|
@@ -282,28 +282,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
282
282
|
)
|
|
283
283
|
byzerllm.connect_cluster(address=args.ray_address)
|
|
284
284
|
|
|
285
|
-
llm = byzerllm.ByzerLLM(verbose=args.print_request)
|
|
286
|
-
|
|
287
|
-
# code_model,index_filter_model,generate_rerank_model,chat_model
|
|
288
|
-
# 这四个模型如果用户没有设置,就会使用默认的
|
|
289
|
-
# 如果用户随便填写 deepseek 官方key,就会导致 Authentic(No User) 的错误
|
|
290
|
-
# 或者 Insuffient Balance 之类的错误
|
|
291
|
-
|
|
292
|
-
code_model = byzerllm.ByzerLLM()
|
|
293
|
-
code_model.setup_default_model_name(args.model)
|
|
294
|
-
llm.setup_sub_client("code_model", code_model)
|
|
295
|
-
|
|
296
|
-
index_filter_model = byzerllm.ByzerLLM()
|
|
297
|
-
index_filter_model.setup_default_model_name("deepseek_r1_chat")
|
|
298
|
-
llm.setup_sub_client("index_filter_model", index_filter_model)
|
|
299
|
-
|
|
300
|
-
generate_rerank_model = byzerllm.ByzerLLM()
|
|
301
|
-
generate_rerank_model.setup_default_model_name("deepseek_r1_chat")
|
|
302
|
-
llm.setup_sub_client("generate_rerank_model", generate_rerank_model)
|
|
303
|
-
|
|
304
|
-
chat_model = byzerllm.ByzerLLM()
|
|
305
|
-
chat_model.setup_default_model_name("deepseek_r1_chat")
|
|
306
|
-
llm.setup_sub_client("chat_model", chat_model)
|
|
285
|
+
llm = byzerllm.ByzerLLM(verbose=args.print_request)
|
|
307
286
|
|
|
308
287
|
if args.product_mode == "lite":
|
|
309
288
|
default_model = args.model
|
autocoder/auto_coder_rag.py
CHANGED
|
@@ -66,7 +66,7 @@ def initialize_system(args):
|
|
|
66
66
|
print_status(get_message("checking_model"), "")
|
|
67
67
|
try:
|
|
68
68
|
result = subprocess.run(
|
|
69
|
-
["easy-byzerllm", "chat", "
|
|
69
|
+
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
70
70
|
capture_output=True,
|
|
71
71
|
text=True,
|
|
72
72
|
timeout=30,
|
|
@@ -115,7 +115,7 @@ def initialize_system(args):
|
|
|
115
115
|
"--infer_params",
|
|
116
116
|
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
117
117
|
"--model",
|
|
118
|
-
"
|
|
118
|
+
"v3_chat",
|
|
119
119
|
]
|
|
120
120
|
|
|
121
121
|
try:
|
|
@@ -129,7 +129,7 @@ def initialize_system(args):
|
|
|
129
129
|
print_status(get_message("validating_deploy"), "")
|
|
130
130
|
try:
|
|
131
131
|
validation_result = subprocess.run(
|
|
132
|
-
["easy-byzerllm", "chat", "
|
|
132
|
+
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
133
133
|
capture_output=True,
|
|
134
134
|
text=True,
|
|
135
135
|
timeout=30,
|
|
@@ -139,7 +139,7 @@ def initialize_system(args):
|
|
|
139
139
|
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
140
140
|
print_status(get_message("validation_fail"), "error")
|
|
141
141
|
print_status(get_message("manual_start"), "warning")
|
|
142
|
-
print_status("easy-byzerllm chat
|
|
142
|
+
print_status("easy-byzerllm chat v3_chat 你好", "")
|
|
143
143
|
|
|
144
144
|
print_status(get_message("init_complete_final"), "success")
|
|
145
145
|
|
|
@@ -168,7 +168,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
168
168
|
)
|
|
169
169
|
build_index_parser.add_argument("--file", default="", help=desc["file"])
|
|
170
170
|
build_index_parser.add_argument(
|
|
171
|
-
"--model", default="
|
|
171
|
+
"--model", default="v3_chat", help=desc["model"]
|
|
172
172
|
)
|
|
173
173
|
build_index_parser.add_argument(
|
|
174
174
|
"--index_model", default="", help=desc["index_model"]
|
|
@@ -199,7 +199,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
199
199
|
"--quick", action="store_true", help="Skip system initialization"
|
|
200
200
|
)
|
|
201
201
|
serve_parser.add_argument("--file", default="", help=desc["file"])
|
|
202
|
-
serve_parser.add_argument("--model", default="
|
|
202
|
+
serve_parser.add_argument("--model", default="v3_chat", help=desc["model"])
|
|
203
203
|
serve_parser.add_argument("--index_model", default="", help=desc["index_model"])
|
|
204
204
|
serve_parser.add_argument("--emb_model", default="", help=desc["emb_model"])
|
|
205
205
|
serve_parser.add_argument("--ray_address", default="auto", help=desc["ray_address"])
|
|
@@ -366,7 +366,7 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
366
366
|
"benchmark", help="Benchmark LLM client performance"
|
|
367
367
|
)
|
|
368
368
|
benchmark_parser.add_argument(
|
|
369
|
-
"--model", default="
|
|
369
|
+
"--model", default="v3_chat", help="Model to benchmark"
|
|
370
370
|
)
|
|
371
371
|
benchmark_parser.add_argument(
|
|
372
372
|
"--parallel", type=int, default=10, help="Number of parallel requests"
|
|
@@ -147,7 +147,7 @@ def parse_args(input_args: Optional[List[str]] = None) -> AutoCoderArgs:
|
|
|
147
147
|
parser = argparse.ArgumentParser(description="Auto Coder RAG Client MCP Server")
|
|
148
148
|
parser.add_argument("--rag_url", required=True, help="RAG server URL")
|
|
149
149
|
parser.add_argument("--rag_token", required=True, help="RAG server token")
|
|
150
|
-
parser.add_argument("--model", default="
|
|
150
|
+
parser.add_argument("--model", default="v3_chat", help=desc["model"])
|
|
151
151
|
parser.add_argument("--rag_params_max_tokens", type=int, default=4096, help="Max tokens for RAG response")
|
|
152
152
|
|
|
153
153
|
args = parser.parse_args(input_args)
|
autocoder/chat_auto_coder.py
CHANGED
|
@@ -74,6 +74,12 @@ def parse_arguments():
|
|
|
74
74
|
help="Enter the auto-coder.chat without initializing the system",
|
|
75
75
|
)
|
|
76
76
|
|
|
77
|
+
parser.add_argument(
|
|
78
|
+
"--skip_provider_selection",
|
|
79
|
+
action="store_true",
|
|
80
|
+
help="Skip the provider selection",
|
|
81
|
+
)
|
|
82
|
+
|
|
77
83
|
parser.add_argument(
|
|
78
84
|
"--product_mode",
|
|
79
85
|
type=str,
|
|
@@ -252,7 +258,12 @@ def configure_project_type():
|
|
|
252
258
|
|
|
253
259
|
|
|
254
260
|
def initialize_system(args):
|
|
261
|
+
from autocoder.utils.model_provider_selector import ModelProviderSelector
|
|
262
|
+
from autocoder import models as models_module
|
|
255
263
|
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
264
|
+
|
|
265
|
+
first_time = [False]
|
|
266
|
+
configure_success = [False]
|
|
256
267
|
|
|
257
268
|
def print_status(message, status):
|
|
258
269
|
if status == "success":
|
|
@@ -264,10 +275,9 @@ def initialize_system(args):
|
|
|
264
275
|
else:
|
|
265
276
|
print(f" {message}")
|
|
266
277
|
|
|
267
|
-
def init_project():
|
|
268
|
-
first_time = False
|
|
278
|
+
def init_project():
|
|
269
279
|
if not os.path.exists(".auto-coder"):
|
|
270
|
-
first_time = True
|
|
280
|
+
first_time[0] = True
|
|
271
281
|
print_status(get_message("not_initialized"), "warning")
|
|
272
282
|
init_choice = input(
|
|
273
283
|
f" {get_message('init_prompt')}").strip().lower()
|
|
@@ -290,140 +300,150 @@ def initialize_system(args):
|
|
|
290
300
|
print_status(get_message("created_dir").format(
|
|
291
301
|
base_persist_dir), "success")
|
|
292
302
|
|
|
293
|
-
if first_time:
|
|
303
|
+
if first_time[0]:
|
|
294
304
|
configure_project_type()
|
|
305
|
+
configure_success[0] = True
|
|
295
306
|
|
|
296
307
|
print_status(get_message("init_complete"), "success")
|
|
297
308
|
|
|
298
309
|
init_project()
|
|
299
310
|
|
|
300
|
-
if args.
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
311
|
+
if not args.skip_provider_selection and first_time[0]:
|
|
312
|
+
if args.product_mode == "lite":
|
|
313
|
+
## 如果已经是配置过的项目,就无需再选择
|
|
314
|
+
if first_time[0]:
|
|
315
|
+
if not models_module.check_model_exists("v3_chat") or not models_module.check_model_exists("r1_chat"):
|
|
316
|
+
model_provider_selector = ModelProviderSelector()
|
|
317
|
+
model_provider_info = model_provider_selector.select_provider()
|
|
318
|
+
if model_provider_info is not None:
|
|
319
|
+
models_json_list = model_provider_selector.to_models_json(model_provider_info)
|
|
320
|
+
models_module.add_and_activate_models(models_json_list)
|
|
321
|
+
|
|
322
|
+
if args.product_mode == "pro":
|
|
323
|
+
# Check if Ray is running
|
|
324
|
+
print_status(get_message("checking_ray"), "")
|
|
325
|
+
ray_status = subprocess.run(
|
|
326
|
+
["ray", "status"], capture_output=True, text=True)
|
|
327
|
+
if ray_status.returncode != 0:
|
|
328
|
+
print_status(get_message("ray_not_running"), "warning")
|
|
329
|
+
try:
|
|
330
|
+
subprocess.run(["ray", "start", "--head"], check=True)
|
|
331
|
+
print_status(get_message("ray_start_success"), "success")
|
|
332
|
+
except subprocess.CalledProcessError:
|
|
333
|
+
print_status(get_message("ray_start_fail"), "error")
|
|
334
|
+
return
|
|
335
|
+
else:
|
|
336
|
+
print_status(get_message("ray_running"), "success")
|
|
337
|
+
|
|
338
|
+
# Check if deepseek_chat model is available
|
|
339
|
+
print_status(get_message("checking_model"), "")
|
|
324
340
|
try:
|
|
325
|
-
subprocess.run(
|
|
326
|
-
|
|
341
|
+
result = subprocess.run(
|
|
342
|
+
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
343
|
+
capture_output=True,
|
|
344
|
+
text=True,
|
|
345
|
+
timeout=30,
|
|
346
|
+
)
|
|
347
|
+
if result.returncode == 0:
|
|
348
|
+
print_status(get_message("model_available"), "success")
|
|
349
|
+
init_project()
|
|
350
|
+
print_status(get_message("init_complete_final"), "success")
|
|
351
|
+
return
|
|
352
|
+
except subprocess.TimeoutExpired:
|
|
353
|
+
print_status(get_message("model_timeout"), "error")
|
|
327
354
|
except subprocess.CalledProcessError:
|
|
328
|
-
print_status(get_message("
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
print_status(get_message("
|
|
355
|
+
print_status(get_message("model_error"), "error")
|
|
356
|
+
|
|
357
|
+
# If deepseek_chat is not available
|
|
358
|
+
print_status(get_message("model_not_available"), "warning")
|
|
359
|
+
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
360
|
+
|
|
361
|
+
print_status(get_message("deploying_model").format("Deepseek官方"), "")
|
|
362
|
+
deploy_cmd = [
|
|
363
|
+
"byzerllm",
|
|
364
|
+
"deploy",
|
|
365
|
+
"--pretrained_model_type",
|
|
366
|
+
"saas/openai",
|
|
367
|
+
"--cpus_per_worker",
|
|
368
|
+
"0.001",
|
|
369
|
+
"--gpus_per_worker",
|
|
370
|
+
"0",
|
|
371
|
+
"--worker_concurrency",
|
|
372
|
+
"1000",
|
|
373
|
+
"--num_workers",
|
|
374
|
+
"1",
|
|
375
|
+
"--infer_params",
|
|
376
|
+
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
377
|
+
"--model",
|
|
378
|
+
"v3_chat",
|
|
379
|
+
]
|
|
332
380
|
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
capture_output=True,
|
|
339
|
-
text=True,
|
|
340
|
-
timeout=30,
|
|
341
|
-
)
|
|
342
|
-
if result.returncode == 0:
|
|
343
|
-
print_status(get_message("model_available"), "success")
|
|
344
|
-
init_project()
|
|
345
|
-
print_status(get_message("init_complete_final"), "success")
|
|
381
|
+
try:
|
|
382
|
+
subprocess.run(deploy_cmd, check=True)
|
|
383
|
+
print_status(get_message("deploy_complete"), "success")
|
|
384
|
+
except subprocess.CalledProcessError:
|
|
385
|
+
print_status(get_message("deploy_fail"), "error")
|
|
346
386
|
return
|
|
347
|
-
|
|
348
|
-
print_status(get_message("model_timeout"), "error")
|
|
349
|
-
except subprocess.CalledProcessError:
|
|
350
|
-
print_status(get_message("model_error"), "error")
|
|
351
|
-
|
|
352
|
-
# If deepseek_chat is not available
|
|
353
|
-
print_status(get_message("model_not_available"), "warning")
|
|
354
|
-
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
355
|
-
|
|
356
|
-
print_status(get_message("deploying_model").format("Deepseek官方"), "")
|
|
357
|
-
deploy_cmd = [
|
|
358
|
-
"byzerllm",
|
|
359
|
-
"deploy",
|
|
360
|
-
"--pretrained_model_type",
|
|
361
|
-
"saas/openai",
|
|
362
|
-
"--cpus_per_worker",
|
|
363
|
-
"0.001",
|
|
364
|
-
"--gpus_per_worker",
|
|
365
|
-
"0",
|
|
366
|
-
"--worker_concurrency",
|
|
367
|
-
"1000",
|
|
368
|
-
"--num_workers",
|
|
369
|
-
"1",
|
|
370
|
-
"--infer_params",
|
|
371
|
-
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
372
|
-
"--model",
|
|
373
|
-
"deepseek_chat",
|
|
374
|
-
]
|
|
375
|
-
|
|
376
|
-
try:
|
|
377
|
-
subprocess.run(deploy_cmd, check=True)
|
|
378
|
-
print_status(get_message("deploy_complete"), "success")
|
|
379
|
-
except subprocess.CalledProcessError:
|
|
380
|
-
print_status(get_message("deploy_fail"), "error")
|
|
381
|
-
return
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
deploy_cmd = [
|
|
385
|
-
"byzerllm",
|
|
386
|
-
"deploy",
|
|
387
|
-
"--pretrained_model_type",
|
|
388
|
-
"saas/reasoning_openai",
|
|
389
|
-
"--cpus_per_worker",
|
|
390
|
-
"0.001",
|
|
391
|
-
"--gpus_per_worker",
|
|
392
|
-
"0",
|
|
393
|
-
"--worker_concurrency",
|
|
394
|
-
"1000",
|
|
395
|
-
"--num_workers",
|
|
396
|
-
"1",
|
|
397
|
-
"--infer_params",
|
|
398
|
-
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-reasoner",
|
|
399
|
-
"--model",
|
|
400
|
-
"deepseek_r1_chat",
|
|
401
|
-
]
|
|
387
|
+
|
|
402
388
|
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
389
|
+
deploy_cmd = [
|
|
390
|
+
"byzerllm",
|
|
391
|
+
"deploy",
|
|
392
|
+
"--pretrained_model_type",
|
|
393
|
+
"saas/reasoning_openai",
|
|
394
|
+
"--cpus_per_worker",
|
|
395
|
+
"0.001",
|
|
396
|
+
"--gpus_per_worker",
|
|
397
|
+
"0",
|
|
398
|
+
"--worker_concurrency",
|
|
399
|
+
"1000",
|
|
400
|
+
"--num_workers",
|
|
401
|
+
"1",
|
|
402
|
+
"--infer_params",
|
|
403
|
+
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-reasoner",
|
|
404
|
+
"--model",
|
|
405
|
+
"r1_chat",
|
|
406
|
+
]
|
|
409
407
|
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
text=True,
|
|
417
|
-
timeout=30,
|
|
418
|
-
check=True,
|
|
419
|
-
)
|
|
420
|
-
print_status(get_message("validation_success"), "success")
|
|
421
|
-
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
422
|
-
print_status(get_message("validation_fail"), "error")
|
|
423
|
-
print_status(get_message("manual_start"), "warning")
|
|
424
|
-
print_status("easy-byzerllm chat deepseek_chat 你好", "")
|
|
408
|
+
try:
|
|
409
|
+
subprocess.run(deploy_cmd, check=True)
|
|
410
|
+
print_status(get_message("deploy_complete"), "success")
|
|
411
|
+
except subprocess.CalledProcessError:
|
|
412
|
+
print_status(get_message("deploy_fail"), "error")
|
|
413
|
+
return
|
|
425
414
|
|
|
426
|
-
|
|
415
|
+
# Validate the deployment
|
|
416
|
+
print_status(get_message("validating_deploy"), "")
|
|
417
|
+
try:
|
|
418
|
+
validation_result = subprocess.run(
|
|
419
|
+
["easy-byzerllm", "chat", "v3_chat", "你好"],
|
|
420
|
+
capture_output=True,
|
|
421
|
+
text=True,
|
|
422
|
+
timeout=30,
|
|
423
|
+
check=True,
|
|
424
|
+
)
|
|
425
|
+
print_status(get_message("validation_success"), "success")
|
|
426
|
+
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
427
|
+
print_status(get_message("validation_fail"), "error")
|
|
428
|
+
print_status(get_message("manual_start"), "warning")
|
|
429
|
+
print_status("easy-byzerllm chat v3_chat 你好", "")
|
|
430
|
+
|
|
431
|
+
print_status(get_message("init_complete_final"), "success")
|
|
432
|
+
configure_success[0] = True
|
|
433
|
+
|
|
434
|
+
if first_time[0] and args.product_mode == "pro" and configure_success[0]:
|
|
435
|
+
configure(f"model:v3_chat", skip_print=True)
|
|
436
|
+
configure(f"chat_model:r1_chat", skip_print=True)
|
|
437
|
+
configure(f"generate_rerank_model:r1_chat", skip_print=True)
|
|
438
|
+
configure(f"code_model:v3_chat", skip_print=True)
|
|
439
|
+
configure(f"index_filter_model:r1_chat", skip_print=True)
|
|
440
|
+
|
|
441
|
+
if first_time[0] and args.product_mode == "lite" and models_module.check_model_exists("v3_chat"):
|
|
442
|
+
configure(f"model:v3_chat", skip_print=True)
|
|
443
|
+
configure(f"chat_model:r1_chat", skip_print=True)
|
|
444
|
+
configure(f"generate_rerank_model:r1_chat", skip_print=True)
|
|
445
|
+
configure(f"code_model:v3_chat", skip_print=True)
|
|
446
|
+
configure(f"index_filter_model:r1_chat", skip_print=True)
|
|
427
447
|
|
|
428
448
|
|
|
429
449
|
def convert_yaml_config_to_str(yaml_config):
|
|
@@ -30,7 +30,11 @@ def init_command_template(source_dir:str):
|
|
|
30
30
|
project_type: py
|
|
31
31
|
|
|
32
32
|
## The model you want to drive AutoCoder to run
|
|
33
|
-
model:
|
|
33
|
+
model: v3_chat
|
|
34
|
+
chat_model: r1_chat
|
|
35
|
+
generate_rerank_model: r1_chat
|
|
36
|
+
code_model: v3_chat
|
|
37
|
+
index_filter_model: r1_chat
|
|
34
38
|
|
|
35
39
|
|
|
36
40
|
## Enable the index building which can help you find the related files by your query
|
|
@@ -38,7 +42,7 @@ def init_command_template(source_dir:str):
|
|
|
38
42
|
skip_build_index: false
|
|
39
43
|
## The model to build index for the project (Optional)
|
|
40
44
|
## 用于为项目构建索引的模型(可选)
|
|
41
|
-
index_model:
|
|
45
|
+
index_model: v3_chat
|
|
42
46
|
|
|
43
47
|
## the filter level to find the related files
|
|
44
48
|
## 0: only find the files with the file name
|
|
@@ -170,7 +174,7 @@ def base_base(source_dir:str,project_type:str)->str:
|
|
|
170
174
|
source_dir: {{ source_dir }}
|
|
171
175
|
target_file: {{ target_file }}
|
|
172
176
|
|
|
173
|
-
model:
|
|
177
|
+
model: v3_chat
|
|
174
178
|
model_max_input_length: 100000
|
|
175
179
|
model_max_input_length: 120000
|
|
176
180
|
enable_multi_round_generate: false
|
autocoder/index/entry.py
CHANGED
|
@@ -70,7 +70,7 @@ def build_index_and_filter_files(
|
|
|
70
70
|
)
|
|
71
71
|
phase_end = time.monotonic()
|
|
72
72
|
stats["timings"]["process_tagged_sources"] = phase_end - phase_start
|
|
73
|
-
|
|
73
|
+
|
|
74
74
|
if not args.skip_build_index and llm:
|
|
75
75
|
# Phase 2: Build index
|
|
76
76
|
if args.request_id and not args.skip_events:
|
autocoder/rag/raw_rag.py
CHANGED
|
@@ -43,7 +43,7 @@ def process_query(context: str, query: str) -> str:
|
|
|
43
43
|
|
|
44
44
|
class RawRAG:
|
|
45
45
|
def __init__(
|
|
46
|
-
self, llm_model="
|
|
46
|
+
self, llm_model="v3_chat", emb_model="emb", storage_name="byzerai_store"
|
|
47
47
|
):
|
|
48
48
|
self.storage = ByzerStorage(
|
|
49
49
|
storage_name, "rag_database", "rag_table", emb_model=emb_model
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.259"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|