auto-coder 0.1.226__py3-none-any.whl → 0.1.227__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/METADATA +2 -2
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/RECORD +21 -19
- autocoder/auto_coder.py +424 -131
- autocoder/chat_auto_coder.py +267 -143
- autocoder/chat_auto_coder_lang.py +30 -1
- autocoder/common/__init__.py +2 -1
- autocoder/common/code_auto_generate.py +23 -9
- autocoder/common/code_auto_generate_diff.py +23 -9
- autocoder/common/code_auto_generate_editblock.py +23 -9
- autocoder/common/code_auto_generate_strict_diff.py +23 -9
- autocoder/common/command_completer.py +6 -0
- autocoder/common/types.py +1 -0
- autocoder/common/utils_code_auto_generate.py +38 -0
- autocoder/dispacher/actions/action.py +4 -4
- autocoder/dispacher/actions/plugins/action_regex_project.py +6 -2
- autocoder/models.py +158 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.226.dist-info → auto_coder-0.1.227.dist-info}/top_level.txt +0 -0
autocoder/chat_auto_coder.py
CHANGED
|
@@ -49,6 +49,8 @@ from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRe
|
|
|
49
49
|
import byzerllm
|
|
50
50
|
from byzerllm.utils import format_str_jinja2
|
|
51
51
|
from autocoder.common.memory_manager import get_global_memory_file_paths
|
|
52
|
+
from autocoder import models
|
|
53
|
+
import shlex
|
|
52
54
|
|
|
53
55
|
|
|
54
56
|
class SymbolItem(BaseModel):
|
|
@@ -68,6 +70,17 @@ def parse_arguments():
|
|
|
68
70
|
action="store_true",
|
|
69
71
|
help="Enter the auto-coder.chat without initializing the system",
|
|
70
72
|
)
|
|
73
|
+
|
|
74
|
+
parser.add_argument(
|
|
75
|
+
"--product_mode",
|
|
76
|
+
type=str,
|
|
77
|
+
default="pro",
|
|
78
|
+
help="The mode of the auto-coder.chat, lite/pro default is pro",
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
parser.add_argument("--lite", action="store_true", help="Lite mode")
|
|
82
|
+
parser.add_argument("--pro", action="store_true", help="Pro mode")
|
|
83
|
+
|
|
71
84
|
return parser.parse_args()
|
|
72
85
|
|
|
73
86
|
|
|
@@ -115,6 +128,7 @@ commands = [
|
|
|
115
128
|
"/lib",
|
|
116
129
|
"/design",
|
|
117
130
|
"/mcp",
|
|
131
|
+
"/models",
|
|
118
132
|
]
|
|
119
133
|
|
|
120
134
|
|
|
@@ -136,9 +150,6 @@ def show_help():
|
|
|
136
150
|
print(
|
|
137
151
|
f" \033[94m/coding\033[0m \033[93m<query>\033[0m - \033[92m{get_message('coding_desc')}\033[0m"
|
|
138
152
|
)
|
|
139
|
-
print(
|
|
140
|
-
f" \033[94m/design\033[0m \033[93m<query>\033[0m - \033[92m{get_message('design_desc')}\033[0m"
|
|
141
|
-
)
|
|
142
153
|
print(
|
|
143
154
|
f" \033[94m/ask\033[0m \033[93m<query>\033[0m - \033[92m{get_message('ask_desc')}\033[0m"
|
|
144
155
|
)
|
|
@@ -175,6 +186,7 @@ def show_help():
|
|
|
175
186
|
print(
|
|
176
187
|
f" \033[94m/mode\033[0m - \033[92m{get_message('mode_desc')}\033[0m")
|
|
177
188
|
print(f" \033[94m/lib\033[0m - \033[92m{get_message('lib_desc')}\033[0m")
|
|
189
|
+
print(f" \033[94m/models\033[0m - \033[92m{get_message('models_desc')}\033[0m")
|
|
178
190
|
print(
|
|
179
191
|
f" \033[94m/exit\033[0m - \033[92m{get_message('exit_desc')}\033[0m")
|
|
180
192
|
print()
|
|
@@ -234,7 +246,7 @@ def configure_project_type():
|
|
|
234
246
|
return project_type
|
|
235
247
|
|
|
236
248
|
|
|
237
|
-
def initialize_system():
|
|
249
|
+
def initialize_system(args):
|
|
238
250
|
print(f"\n\033[1;34m{get_message('initializing')}\033[0m")
|
|
239
251
|
|
|
240
252
|
def print_status(message, status):
|
|
@@ -279,88 +291,108 @@ def initialize_system():
|
|
|
279
291
|
print_status(get_message("init_complete"), "success")
|
|
280
292
|
|
|
281
293
|
init_project()
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
294
|
+
|
|
295
|
+
if args.product_mode == "lite":
|
|
296
|
+
# Setup deepseek api key
|
|
297
|
+
api_key_dir = os.path.expanduser("~/.auto-coder/keys")
|
|
298
|
+
api_key_file = os.path.join(api_key_dir, "api.deepseek.com")
|
|
299
|
+
|
|
300
|
+
if not os.path.exists(api_key_file):
|
|
301
|
+
print_status(get_message("model_not_available"), "warning")
|
|
302
|
+
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
303
|
+
|
|
304
|
+
# Create directory if it doesn't exist
|
|
305
|
+
os.makedirs(api_key_dir, exist_ok=True)
|
|
306
|
+
|
|
307
|
+
# Save the API key
|
|
308
|
+
with open(api_key_file, "w") as f:
|
|
309
|
+
f.write(api_key)
|
|
310
|
+
|
|
311
|
+
print_status(f"API key saved successfully: {api_key_file}", "success")
|
|
312
|
+
|
|
313
|
+
if args.product_mode == "pro":
|
|
314
|
+
# Check if Ray is running
|
|
315
|
+
print_status(get_message("checking_ray"), "")
|
|
316
|
+
ray_status = subprocess.run(
|
|
317
|
+
["ray", "status"], capture_output=True, text=True)
|
|
318
|
+
if ray_status.returncode != 0:
|
|
319
|
+
print_status(get_message("ray_not_running"), "warning")
|
|
320
|
+
try:
|
|
321
|
+
subprocess.run(["ray", "start", "--head"], check=True)
|
|
322
|
+
print_status(get_message("ray_start_success"), "success")
|
|
323
|
+
except subprocess.CalledProcessError:
|
|
324
|
+
print_status(get_message("ray_start_fail"), "error")
|
|
325
|
+
return
|
|
326
|
+
else:
|
|
327
|
+
print_status(get_message("ray_running"), "success")
|
|
328
|
+
|
|
329
|
+
# Check if deepseek_chat model is available
|
|
330
|
+
print_status(get_message("checking_model"), "")
|
|
288
331
|
try:
|
|
289
|
-
subprocess.run(
|
|
290
|
-
|
|
332
|
+
result = subprocess.run(
|
|
333
|
+
["easy-byzerllm", "chat", "deepseek_chat", "你好"],
|
|
334
|
+
capture_output=True,
|
|
335
|
+
text=True,
|
|
336
|
+
timeout=30,
|
|
337
|
+
)
|
|
338
|
+
if result.returncode == 0:
|
|
339
|
+
print_status(get_message("model_available"), "success")
|
|
340
|
+
init_project()
|
|
341
|
+
print_status(get_message("init_complete_final"), "success")
|
|
342
|
+
return
|
|
343
|
+
except subprocess.TimeoutExpired:
|
|
344
|
+
print_status(get_message("model_timeout"), "error")
|
|
291
345
|
except subprocess.CalledProcessError:
|
|
292
|
-
print_status(get_message("
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
print_status(get_message("
|
|
346
|
+
print_status(get_message("model_error"), "error")
|
|
347
|
+
|
|
348
|
+
# If deepseek_chat is not available
|
|
349
|
+
print_status(get_message("model_not_available"), "warning")
|
|
350
|
+
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
351
|
+
|
|
352
|
+
print_status(get_message("deploying_model").format("Deepseek官方"), "")
|
|
353
|
+
deploy_cmd = [
|
|
354
|
+
"byzerllm",
|
|
355
|
+
"deploy",
|
|
356
|
+
"--pretrained_model_type",
|
|
357
|
+
"saas/openai",
|
|
358
|
+
"--cpus_per_worker",
|
|
359
|
+
"0.001",
|
|
360
|
+
"--gpus_per_worker",
|
|
361
|
+
"0",
|
|
362
|
+
"--worker_concurrency",
|
|
363
|
+
"1000",
|
|
364
|
+
"--num_workers",
|
|
365
|
+
"1",
|
|
366
|
+
"--infer_params",
|
|
367
|
+
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
368
|
+
"--model",
|
|
369
|
+
"deepseek_chat",
|
|
370
|
+
]
|
|
296
371
|
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
capture_output=True,
|
|
303
|
-
text=True,
|
|
304
|
-
timeout=30,
|
|
305
|
-
)
|
|
306
|
-
if result.returncode == 0:
|
|
307
|
-
print_status(get_message("model_available"), "success")
|
|
308
|
-
init_project()
|
|
309
|
-
print_status(get_message("init_complete_final"), "success")
|
|
372
|
+
try:
|
|
373
|
+
subprocess.run(deploy_cmd, check=True)
|
|
374
|
+
print_status(get_message("deploy_complete"), "success")
|
|
375
|
+
except subprocess.CalledProcessError:
|
|
376
|
+
print_status(get_message("deploy_fail"), "error")
|
|
310
377
|
return
|
|
311
|
-
except subprocess.TimeoutExpired:
|
|
312
|
-
print_status(get_message("model_timeout"), "error")
|
|
313
|
-
except subprocess.CalledProcessError:
|
|
314
|
-
print_status(get_message("model_error"), "error")
|
|
315
|
-
|
|
316
|
-
# If deepseek_chat is not available
|
|
317
|
-
print_status(get_message("model_not_available"), "warning")
|
|
318
|
-
api_key = prompt(HTML(f"<b>{get_message('enter_api_key')} </b>"))
|
|
319
|
-
|
|
320
|
-
print_status(get_message("deploying_model").format("Deepseek官方"), "")
|
|
321
|
-
deploy_cmd = [
|
|
322
|
-
"byzerllm",
|
|
323
|
-
"deploy",
|
|
324
|
-
"--pretrained_model_type",
|
|
325
|
-
"saas/openai",
|
|
326
|
-
"--cpus_per_worker",
|
|
327
|
-
"0.001",
|
|
328
|
-
"--gpus_per_worker",
|
|
329
|
-
"0",
|
|
330
|
-
"--worker_concurrency",
|
|
331
|
-
"1000",
|
|
332
|
-
"--num_workers",
|
|
333
|
-
"1",
|
|
334
|
-
"--infer_params",
|
|
335
|
-
f"saas.base_url=https://api.deepseek.com/v1 saas.api_key={api_key} saas.model=deepseek-chat",
|
|
336
|
-
"--model",
|
|
337
|
-
"deepseek_chat",
|
|
338
|
-
]
|
|
339
|
-
|
|
340
|
-
try:
|
|
341
|
-
subprocess.run(deploy_cmd, check=True)
|
|
342
|
-
print_status(get_message("deploy_complete"), "success")
|
|
343
|
-
except subprocess.CalledProcessError:
|
|
344
|
-
print_status(get_message("deploy_fail"), "error")
|
|
345
|
-
return
|
|
346
378
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
379
|
+
# Validate the deployment
|
|
380
|
+
print_status(get_message("validating_deploy"), "")
|
|
381
|
+
try:
|
|
382
|
+
validation_result = subprocess.run(
|
|
383
|
+
["easy-byzerllm", "chat", "deepseek_chat", "你好"],
|
|
384
|
+
capture_output=True,
|
|
385
|
+
text=True,
|
|
386
|
+
timeout=30,
|
|
387
|
+
check=True,
|
|
388
|
+
)
|
|
389
|
+
print_status(get_message("validation_success"), "success")
|
|
390
|
+
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
391
|
+
print_status(get_message("validation_fail"), "error")
|
|
392
|
+
print_status(get_message("manual_start"), "warning")
|
|
393
|
+
print_status("easy-byzerllm chat deepseek_chat 你好", "")
|
|
362
394
|
|
|
363
|
-
|
|
395
|
+
print_status(get_message("init_complete_final"), "success")
|
|
364
396
|
|
|
365
397
|
|
|
366
398
|
def convert_yaml_config_to_str(yaml_config):
|
|
@@ -504,66 +536,6 @@ def configure(conf: str, skip_print=False):
|
|
|
504
536
|
if not skip_print:
|
|
505
537
|
print(f"\033[92mSet {key} to {value}\033[0m")
|
|
506
538
|
|
|
507
|
-
|
|
508
|
-
def show_help():
|
|
509
|
-
print(f"\033[1m{get_message('supported_commands')}\033[0m")
|
|
510
|
-
print()
|
|
511
|
-
print(
|
|
512
|
-
f" \033[94m{get_message('commands')}\033[0m - \033[93m{get_message('description')}\033[0m"
|
|
513
|
-
)
|
|
514
|
-
print(
|
|
515
|
-
f" \033[94m/add_files\033[0m \033[93m<file1> <file2> ...\033[0m - \033[92m{get_message('add_files_desc')}\033[0m"
|
|
516
|
-
)
|
|
517
|
-
print(
|
|
518
|
-
f" \033[94m/remove_files\033[0m \033[93m<file1>,<file2> ...\033[0m - \033[92m{get_message('remove_files_desc')}\033[0m"
|
|
519
|
-
)
|
|
520
|
-
print(
|
|
521
|
-
f" \033[94m/chat\033[0m \033[93m<query>\033[0m - \033[92m{get_message('chat_desc')}\033[0m"
|
|
522
|
-
)
|
|
523
|
-
print(
|
|
524
|
-
f" \033[94m/coding\033[0m \033[93m<query>\033[0m - \033[92m{get_message('coding_desc')}\033[0m"
|
|
525
|
-
)
|
|
526
|
-
print(
|
|
527
|
-
f" \033[94m/ask\033[0m \033[93m<query>\033[0m - \033[92m{get_message('ask_desc')}\033[0m"
|
|
528
|
-
)
|
|
529
|
-
print(
|
|
530
|
-
f" \033[94m/summon\033[0m \033[93m<query>\033[0m - \033[92m{get_message('summon_desc')}\033[0m"
|
|
531
|
-
)
|
|
532
|
-
print(
|
|
533
|
-
f" \033[94m/revert\033[0m - \033[92m{get_message('revert_desc')}\033[0m")
|
|
534
|
-
print(
|
|
535
|
-
f" \033[94m/commit\033[0m - \033[92m{get_message('commit_desc')}\033[0m")
|
|
536
|
-
print(
|
|
537
|
-
f" \033[94m/conf\033[0m \033[93m<key>:<value>\033[0m - \033[92m{get_message('conf_desc')}\033[0m"
|
|
538
|
-
)
|
|
539
|
-
print(
|
|
540
|
-
f" \033[94m/index/query\033[0m \033[93m<args>\033[0m - \033[92m{get_message('index_query_desc')}\033[0m"
|
|
541
|
-
)
|
|
542
|
-
print(
|
|
543
|
-
f" \033[94m/index/build\033[0m - \033[92m{get_message('index_build_desc')}\033[0m"
|
|
544
|
-
)
|
|
545
|
-
print(
|
|
546
|
-
f" \033[94m/list_files\033[0m - \033[92m{get_message('list_files_desc')}\033[0m"
|
|
547
|
-
)
|
|
548
|
-
print(
|
|
549
|
-
f" \033[94m/help\033[0m - \033[92m{get_message('help_desc')}\033[0m")
|
|
550
|
-
print(
|
|
551
|
-
f" \033[94m/exclude_dirs\033[0m \033[93m<dir1>,<dir2> ...\033[0m - \033[92m{get_message('exclude_dirs_desc')}\033[0m"
|
|
552
|
-
)
|
|
553
|
-
print(
|
|
554
|
-
f" \033[94m/shell\033[0m \033[93m<command>\033[0m - \033[92m{get_message('shell_desc')}\033[0m"
|
|
555
|
-
)
|
|
556
|
-
print(
|
|
557
|
-
f" \033[94m/voice_input\033[0m - \033[92m{get_message('voice_input_desc')}\033[0m"
|
|
558
|
-
)
|
|
559
|
-
print(
|
|
560
|
-
f" \033[94m/mode\033[0m - \033[92m{get_message('mode_desc')}\033[0m")
|
|
561
|
-
print(f" \033[94m/lib\033[0m - \033[92m{get_message('lib_desc')}\033[0m")
|
|
562
|
-
print(
|
|
563
|
-
f" \033[94m/exit\033[0m - \033[92m{get_message('exit_desc')}\033[0m")
|
|
564
|
-
print()
|
|
565
|
-
|
|
566
|
-
|
|
567
539
|
# word_completer = WordCompleter(commands)
|
|
568
540
|
|
|
569
541
|
|
|
@@ -904,6 +876,14 @@ class CommandCompleter(Completer):
|
|
|
904
876
|
for command in parser.get_sub_commands():
|
|
905
877
|
if command.startswith(current_word):
|
|
906
878
|
yield Completion(command, start_position=-len(current_word))
|
|
879
|
+
elif words[0] == "/models":
|
|
880
|
+
new_text = text[len("/models"):]
|
|
881
|
+
parser = CommandTextParser(new_text, words[0])
|
|
882
|
+
parser.lib()
|
|
883
|
+
current_word = parser.current_word()
|
|
884
|
+
for command in parser.get_sub_commands():
|
|
885
|
+
if command.startswith(current_word):
|
|
886
|
+
yield Completion(command, start_position=-len(current_word))
|
|
907
887
|
|
|
908
888
|
elif words[0] == "/coding":
|
|
909
889
|
new_text = text[len("/coding"):]
|
|
@@ -2088,6 +2068,133 @@ def generate_shell_command(input_text):
|
|
|
2088
2068
|
finally:
|
|
2089
2069
|
os.remove(execute_file)
|
|
2090
2070
|
|
|
2071
|
+
def manage_models(params, query: str):
|
|
2072
|
+
"""
|
|
2073
|
+
Handle /models subcommands:
|
|
2074
|
+
/models /list - List all models (default + custom)
|
|
2075
|
+
/models /add <name> <api_key> - Add model with simplified params
|
|
2076
|
+
/models /add_model name=xxx base_url=xxx ... - Add model with custom params
|
|
2077
|
+
/models /remove <name> - Remove model by name
|
|
2078
|
+
"""
|
|
2079
|
+
print("manage_models", params, query)
|
|
2080
|
+
console = Console()
|
|
2081
|
+
|
|
2082
|
+
if params.product_mode != "lite":
|
|
2083
|
+
console.print(f"[red]{get_message('models_lite_only')}[/red]")
|
|
2084
|
+
return
|
|
2085
|
+
|
|
2086
|
+
models_data = models.load_models()
|
|
2087
|
+
subcmd = ""
|
|
2088
|
+
if "/list" in query:
|
|
2089
|
+
subcmd = "/list"
|
|
2090
|
+
query = query.replace("/list", "", 1).strip()
|
|
2091
|
+
|
|
2092
|
+
if "/add_model" in query:
|
|
2093
|
+
subcmd = "/add_model"
|
|
2094
|
+
query = query.replace("/add_model", "", 1).strip()
|
|
2095
|
+
|
|
2096
|
+
if "/add" in query:
|
|
2097
|
+
subcmd = "/add"
|
|
2098
|
+
query = query.replace("/add", "", 1).strip()
|
|
2099
|
+
|
|
2100
|
+
if "/remove" in query:
|
|
2101
|
+
subcmd = "/remove"
|
|
2102
|
+
query = query.replace("/remove", "", 1).strip()
|
|
2103
|
+
if not subcmd:
|
|
2104
|
+
console.print(get_message("models_usage"))
|
|
2105
|
+
return
|
|
2106
|
+
|
|
2107
|
+
if subcmd == "/list":
|
|
2108
|
+
if models_data:
|
|
2109
|
+
table = Table(title=get_message("models_title"))
|
|
2110
|
+
table.add_column("Name", style="cyan")
|
|
2111
|
+
table.add_column("Model Type", style="green")
|
|
2112
|
+
table.add_column("Model Name", style="magenta")
|
|
2113
|
+
table.add_column("Description", style="white")
|
|
2114
|
+
for m in models_data:
|
|
2115
|
+
# Check if api_key_path exists and file exists
|
|
2116
|
+
api_key_path = m.get("api_key_path", "")
|
|
2117
|
+
name = m.get("name", "")
|
|
2118
|
+
if api_key_path:
|
|
2119
|
+
api_key_file = os.path.expanduser(f"~/.auto-coder/keys/{api_key_path}")
|
|
2120
|
+
if os.path.exists(api_key_file):
|
|
2121
|
+
name = f"{name}*"
|
|
2122
|
+
|
|
2123
|
+
table.add_row(
|
|
2124
|
+
name,
|
|
2125
|
+
m.get("model_type", ""),
|
|
2126
|
+
m.get("model_name", ""),
|
|
2127
|
+
m.get("description", "")
|
|
2128
|
+
)
|
|
2129
|
+
console.print(table)
|
|
2130
|
+
else:
|
|
2131
|
+
console.print(f"[yellow]{get_message('models_no_models')}[/yellow]")
|
|
2132
|
+
|
|
2133
|
+
elif subcmd == "/add":
|
|
2134
|
+
# Support both simplified and legacy formats
|
|
2135
|
+
args = query.strip().split(" ")
|
|
2136
|
+
if len(args) == 2:
|
|
2137
|
+
# Simplified: /models /add <name> <api_key>
|
|
2138
|
+
name, api_key = args[0], args[1]
|
|
2139
|
+
result = models.update_model_with_api_key(name, api_key)
|
|
2140
|
+
if result:
|
|
2141
|
+
console.print(f"[green]{get_message('models_added').format(name=name)}[/green]")
|
|
2142
|
+
else:
|
|
2143
|
+
console.print(f"[red]{get_message('models_add_failed').format(name=name)}[/red]")
|
|
2144
|
+
else:
|
|
2145
|
+
console.print(f"[red]{get_message('models_add_usage')}[/red]")
|
|
2146
|
+
|
|
2147
|
+
elif subcmd == "/add_model":
|
|
2148
|
+
# Parse key=value pairs: /models /add_model name=abc base_url=http://xx ...
|
|
2149
|
+
# Collect key=value pairs
|
|
2150
|
+
kv_pairs = shlex.split(query)
|
|
2151
|
+
data_dict = {}
|
|
2152
|
+
for pair in kv_pairs:
|
|
2153
|
+
if '=' not in pair:
|
|
2154
|
+
console.print(f"[red]Invalid parameter: {pair}, should be key=value[/red]")
|
|
2155
|
+
continue
|
|
2156
|
+
k, v = pair.split('=', 1)
|
|
2157
|
+
data_dict[k.strip()] = v.strip()
|
|
2158
|
+
|
|
2159
|
+
# Name is required
|
|
2160
|
+
if "name" not in data_dict:
|
|
2161
|
+
console.print(f"[red]{get_message('models_add_model_name_required')}[/red]")
|
|
2162
|
+
return
|
|
2163
|
+
|
|
2164
|
+
# Check duplication
|
|
2165
|
+
if any(m["name"] == data_dict["name"] for m in models_data):
|
|
2166
|
+
console.print(f"[yellow]{get_message('models_add_model_exists').format(name=data_dict['name'])}[/yellow]")
|
|
2167
|
+
return
|
|
2168
|
+
|
|
2169
|
+
# Create model with defaults
|
|
2170
|
+
final_model = {
|
|
2171
|
+
"name": data_dict["name"],
|
|
2172
|
+
"model_type": data_dict.get("model_type", "saas/openai"),
|
|
2173
|
+
"model_name": data_dict.get("model_name", data_dict["name"]),
|
|
2174
|
+
"base_url": data_dict.get("base_url", "https://api.openai.com/v1"),
|
|
2175
|
+
"api_key_path": data_dict.get("api_key_path", "api.openai.com"),
|
|
2176
|
+
"description": data_dict.get("description", "")
|
|
2177
|
+
}
|
|
2178
|
+
|
|
2179
|
+
models_data.append(final_model)
|
|
2180
|
+
models.save_models(models_data)
|
|
2181
|
+
console.print(f"[green]{get_message('models_add_model_success').format(name=data_dict['name'])}[/green]")
|
|
2182
|
+
|
|
2183
|
+
elif subcmd == "/remove":
|
|
2184
|
+
args = query.strip().split(" ")
|
|
2185
|
+
if len(args) < 1:
|
|
2186
|
+
console.print(f"[red]{get_message('models_add_usage')}[/red]")
|
|
2187
|
+
return
|
|
2188
|
+
name = args[0]
|
|
2189
|
+
filtered_models = [m for m in models_data if m["name"] != name]
|
|
2190
|
+
if len(filtered_models) == len(models_data):
|
|
2191
|
+
console.print(f"[yellow]{get_message('models_add_model_remove').format(name=name)}[/yellow]")
|
|
2192
|
+
return
|
|
2193
|
+
models.save_models(filtered_models)
|
|
2194
|
+
console.print(f"[green]{get_message('models_add_model_removed').format(name=name)}[/green]")
|
|
2195
|
+
|
|
2196
|
+
else:
|
|
2197
|
+
console.print(f"[yellow]{get_message('models_unknown_subcmd').format(subcmd=subcmd)}[/yellow]")
|
|
2091
2198
|
|
|
2092
2199
|
def exclude_dirs(dir_names: List[str]):
|
|
2093
2200
|
new_dirs = dir_names
|
|
@@ -2372,11 +2479,19 @@ def lib_command(args: List[str]):
|
|
|
2372
2479
|
|
|
2373
2480
|
def main():
|
|
2374
2481
|
ARGS = parse_arguments()
|
|
2482
|
+
|
|
2483
|
+
if ARGS.lite:
|
|
2484
|
+
ARGS.product_mode = "lite"
|
|
2485
|
+
|
|
2486
|
+
if ARGS.pro:
|
|
2487
|
+
ARGS.product_mode = "pro"
|
|
2375
2488
|
|
|
2376
2489
|
if not ARGS.quick:
|
|
2377
|
-
initialize_system()
|
|
2378
|
-
|
|
2490
|
+
initialize_system(ARGS)
|
|
2491
|
+
|
|
2379
2492
|
load_memory()
|
|
2493
|
+
|
|
2494
|
+
configure(f"product_mode:{ARGS.product_mode}")
|
|
2380
2495
|
|
|
2381
2496
|
MODES = {
|
|
2382
2497
|
"normal": "normal",
|
|
@@ -2434,6 +2549,8 @@ def main():
|
|
|
2434
2549
|
memory["mode"] = "normal"
|
|
2435
2550
|
mode = memory["mode"]
|
|
2436
2551
|
human_as_model = memory["conf"].get("human_as_model", "false")
|
|
2552
|
+
if mode not in MODES:
|
|
2553
|
+
mode = "normal"
|
|
2437
2554
|
return f" Mode: {MODES[mode]} (ctl+k) | Human as Model: {human_as_model} (ctl+n or /conf human_as_model:true/false)"
|
|
2438
2555
|
|
|
2439
2556
|
session = PromptSession(
|
|
@@ -2528,6 +2645,13 @@ def main():
|
|
|
2528
2645
|
elif user_input.startswith("/list_files"):
|
|
2529
2646
|
list_files()
|
|
2530
2647
|
|
|
2648
|
+
elif user_input.startswith("/models"):
|
|
2649
|
+
query = user_input[len("/models"):].strip()
|
|
2650
|
+
if not query:
|
|
2651
|
+
print("Please enter your query.")
|
|
2652
|
+
else:
|
|
2653
|
+
manage_models(ARGS,query)
|
|
2654
|
+
|
|
2531
2655
|
elif user_input.startswith("/mode"):
|
|
2532
2656
|
conf = user_input[len("/mode"):].strip()
|
|
2533
2657
|
if not conf:
|
|
@@ -2606,7 +2730,7 @@ def main():
|
|
|
2606
2730
|
result = eval(code)
|
|
2607
2731
|
print(f"Debug result: {result}")
|
|
2608
2732
|
except Exception as e:
|
|
2609
|
-
print(f"Debug error: {str(e)}")
|
|
2733
|
+
print(f"Debug error: {str(e)}")
|
|
2610
2734
|
|
|
2611
2735
|
# elif user_input.startswith("/shell"):
|
|
2612
2736
|
else:
|
|
@@ -66,6 +66,21 @@ MESSAGES = {
|
|
|
66
66
|
"exit_desc": "Exit the program",
|
|
67
67
|
"design_desc": "Generate SVG image based on the provided description",
|
|
68
68
|
"commit_desc": "Auto generate yaml file and commit changes based on user's manual changes",
|
|
69
|
+
"models_desc": "Manage model configurations, only available in lite mode",
|
|
70
|
+
"models_usage": "Usage: /models /list|/add|/add_model|/remove ...",
|
|
71
|
+
"models_added": "Added/Updated model '{name}' successfully.",
|
|
72
|
+
"models_add_failed": "Failed to add model '{name}'. Model not found in defaults.",
|
|
73
|
+
"models_add_usage": "Usage: /models /add <name> <api_key> or\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
74
|
+
"models_add_model_params": "Please provide parameters in key=value format",
|
|
75
|
+
"models_add_model_name_required": "'name' parameter is required",
|
|
76
|
+
"models_add_model_exists": "Model '{name}' already exists.",
|
|
77
|
+
"models_add_model_success": "Successfully added custom model: {name}",
|
|
78
|
+
"models_add_model_remove": "Model '{name}' not found.",
|
|
79
|
+
"models_add_model_removed": "Removed model: {name}",
|
|
80
|
+
"models_unknown_subcmd": "Unknown subcommand: {subcmd}",
|
|
81
|
+
"models_title": "All Models (内置 + models.json)",
|
|
82
|
+
"models_no_models": "No models found.",
|
|
83
|
+
"models_lite_only": "The /models command is only available in lite mode"
|
|
69
84
|
},
|
|
70
85
|
"zh": {
|
|
71
86
|
"initializing": "🚀 正在初始化系统...",
|
|
@@ -132,7 +147,21 @@ MESSAGES = {
|
|
|
132
147
|
"exit_desc": "退出程序",
|
|
133
148
|
"design_desc": "根据需求设计SVG图片",
|
|
134
149
|
"commit_desc": "根据用户人工修改的代码自动生成yaml文件并提交更改",
|
|
135
|
-
|
|
150
|
+
"models_desc": "管理模型配置,仅在lite模式下可用",
|
|
151
|
+
"models_usage": "用法: /models /list|/add|/add_model|/remove ...",
|
|
152
|
+
"models_added": "成功添加/更新模型 '{name}'。",
|
|
153
|
+
"models_add_failed": "添加模型 '{name}' 失败。在默认模型中未找到该模型。",
|
|
154
|
+
"models_add_usage": "用法: /models /add <name> <api_key> 或\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
155
|
+
"models_add_model_params": "请提供 key=value 格式的参数",
|
|
156
|
+
"models_add_model_name_required": "缺少必需的 'name' 参数",
|
|
157
|
+
"models_add_model_exists": "模型 '{name}' 已存在。",
|
|
158
|
+
"models_add_model_success": "成功添加自定义模型: {name}",
|
|
159
|
+
"models_add_model_remove": "找不到模型 '{name}'。",
|
|
160
|
+
"models_add_model_removed": "已移除模型: {name}",
|
|
161
|
+
"models_unknown_subcmd": "未知的子命令: {subcmd}",
|
|
162
|
+
"models_title": "所有模型 (内置 + models.json)",
|
|
163
|
+
"models_no_models": "未找到任何模型。",
|
|
164
|
+
"models_lite_only": "/models 命令仅在 lite 模式下可用"
|
|
136
165
|
}
|
|
137
166
|
}
|
|
138
167
|
|
autocoder/common/__init__.py
CHANGED
|
@@ -351,7 +351,8 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
351
351
|
generate_times_same_model: Optional[int] = 1
|
|
352
352
|
|
|
353
353
|
action: List[str] = []
|
|
354
|
-
enable_global_memory: Optional[bool] = True
|
|
354
|
+
enable_global_memory: Optional[bool] = True
|
|
355
|
+
product_mode: Optional[str] = "lite"
|
|
355
356
|
|
|
356
357
|
class Config:
|
|
357
358
|
protected_namespaces = ()
|
|
@@ -6,6 +6,7 @@ from autocoder.utils.queue_communicate import queue_communicate, CommunicateEven
|
|
|
6
6
|
from autocoder.common import sys_prompt
|
|
7
7
|
from concurrent.futures import ThreadPoolExecutor
|
|
8
8
|
from autocoder.common.types import CodeGenerateResult
|
|
9
|
+
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
class CodeAutoGenerate:
|
|
@@ -187,34 +188,47 @@ class CodeAutoGenerate:
|
|
|
187
188
|
|
|
188
189
|
conversations_list = []
|
|
189
190
|
results = []
|
|
191
|
+
input_tokens_count = 0
|
|
192
|
+
generated_tokens_count = 0
|
|
190
193
|
if not self.args.human_as_model:
|
|
191
194
|
with ThreadPoolExecutor(max_workers=len(self.llms) * self.generate_times_same_model) as executor:
|
|
192
195
|
futures = []
|
|
193
196
|
for llm in self.llms:
|
|
194
197
|
for _ in range(self.generate_times_same_model):
|
|
195
198
|
futures.append(executor.submit(
|
|
196
|
-
llm
|
|
197
|
-
|
|
199
|
+
chat_with_continue, llm=llm, conversations=conversations, llm_config=llm_config))
|
|
200
|
+
temp_results = [future.result() for future in futures]
|
|
201
|
+
for result in temp_results:
|
|
202
|
+
results.append(result.content)
|
|
203
|
+
input_tokens_count += result.input_tokens_count
|
|
204
|
+
generated_tokens_count += result.generated_tokens_count
|
|
205
|
+
|
|
198
206
|
for result in results:
|
|
199
207
|
conversations_list.append(
|
|
200
208
|
conversations + [{"role": "assistant", "content": result}])
|
|
201
209
|
else:
|
|
202
210
|
for _ in range(self.args.human_model_num):
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
211
|
+
single_result = chat_with_continue(llm=self.llms[0], conversations=conversations, llm_config=llm_config)
|
|
212
|
+
results.append(single_result.content)
|
|
213
|
+
input_tokens_count += single_result.input_tokens_count
|
|
214
|
+
generated_tokens_count += single_result.generated_tokens_count
|
|
215
|
+
conversations_list.append(conversations + [{"role": "assistant", "content": single_result.content}])
|
|
216
|
+
|
|
217
|
+
statistics = {
|
|
218
|
+
"input_tokens_count": input_tokens_count,
|
|
219
|
+
"generated_tokens_count": generated_tokens_count
|
|
220
|
+
}
|
|
221
|
+
|
|
208
222
|
if self.args.request_id and not self.args.skip_events:
|
|
209
223
|
queue_communicate.send_event_no_wait(
|
|
210
224
|
request_id=self.args.request_id,
|
|
211
225
|
event=CommunicateEvent(
|
|
212
226
|
event_type=CommunicateEventType.CODE_GENERATE_END.value,
|
|
213
|
-
data=
|
|
227
|
+
data=json.dumps(statistics, ensure_ascii=False),
|
|
214
228
|
),
|
|
215
229
|
)
|
|
216
230
|
|
|
217
|
-
return CodeGenerateResult(contents=results, conversations=conversations_list)
|
|
231
|
+
return CodeGenerateResult(contents=results, conversations=conversations_list, metadata=statistics)
|
|
218
232
|
|
|
219
233
|
def multi_round_run(
|
|
220
234
|
self, query: str, source_content: str, max_steps: int = 10
|