auto-coder 0.1.228__py3-none-any.whl → 0.1.230__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: auto-coder
3
- Version: 0.1.228
3
+ Version: 0.1.230
4
4
  Summary: AutoCoder: AutoCoder
5
5
  Author: allwefantasy
6
6
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
26
26
  Requires-Dist: jupyter-client
27
27
  Requires-Dist: prompt-toolkit
28
28
  Requires-Dist: tokenizers
29
- Requires-Dist: byzerllm[saas] >=0.1.149
29
+ Requires-Dist: byzerllm[saas] >=0.1.150
30
30
  Requires-Dist: patch
31
31
  Requires-Dist: diff-match-patch
32
32
  Requires-Dist: GitPython
@@ -35,6 +35,7 @@ Requires-Dist: anthropic
35
35
  Requires-Dist: google-generativeai
36
36
  Requires-Dist: protobuf
37
37
  Requires-Dist: azure-cognitiveservices-speech
38
+ Requires-Dist: real-agent
38
39
  Requires-Dist: python-docx
39
40
  Requires-Dist: docx2txt
40
41
  Requires-Dist: pdf2image
@@ -1,17 +1,17 @@
1
1
  autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- autocoder/auto_coder.py,sha256=dggZdWFCXACD-5nObadXgA-JADDdwMwNkuyBhtqU_50,59164
2
+ autocoder/auto_coder.py,sha256=DEAtEcOHw82tbmX64VeaUpsR6eG_mM2FTm_fwyKZ7IE,58226
3
3
  autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
4
4
  autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
5
5
  autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
6
6
  autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
7
7
  autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
8
8
  autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
9
- autocoder/chat_auto_coder.py,sha256=ZKj_VdoEAFPsdInf_dQeBK6lQtz8Mzz7L4nLO6l8pEY,101456
9
+ autocoder/chat_auto_coder.py,sha256=KtDAwIiBB1b2jBSY8BCoSj88iRSwtRACkzME9h91ido,101601
10
10
  autocoder/chat_auto_coder_lang.py,sha256=YJsFi8an0Kjbo9X7xKZfpdbHS3rbhrvChZNjWqEQ5Sw,11032
11
11
  autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
12
12
  autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
13
- autocoder/models.py,sha256=DX30r_VjQK9cvYHPLYyEXuifkoJat8sRCvbOytrXwtY,5054
14
- autocoder/version.py,sha256=Wi8xJHc_sggKQwapQNkMrXbzUQskh-gmUlZ7t1VxJmc,24
13
+ autocoder/models.py,sha256=FlBrF6HhGao_RiCSgYhCmP7vs0KlG4hI_BI6dyZiL9s,5292
14
+ autocoder/version.py,sha256=fIEM3Ro4B5XghP9WBVkzWKwljXRNXRcr3OsrxvyVk0U,24
15
15
  autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
17
17
  autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
@@ -120,6 +120,7 @@ autocoder/utils/_markitdown.py,sha256=RU88qn4eZfYIy0GDrPxlI8oYXIypbi63VRJjdlnE0V
120
120
  autocoder/utils/coder.py,sha256=rK8e0svQBe0NOP26dIGToUXgha_hUDgxlWoC_p_r7oc,5698
121
121
  autocoder/utils/conversation_store.py,sha256=sz-hhY7sttPAUOAQU6Pze-5zJc3j0_Emj22dM_0l5ro,1161
122
122
  autocoder/utils/llm_client_interceptors.py,sha256=FEHNXoFZlCjAHQcjPRyX8FOMjo6rPXpO2AJ2zn2KTTo,901
123
+ autocoder/utils/llms.py,sha256=YH2hJIkHUEBOz93nXzJmWUIHC9oXFlHDjE8DF3NP2q4,2252
123
124
  autocoder/utils/log_capture.py,sha256=I-bsJFLWoGUiX-GKoZsH9kWJCKSV7ZlUnRt7jh-fOL0,1548
124
125
  autocoder/utils/multi_turn.py,sha256=unK9OpqVRbK6uIcTKXgggX2wNmyj7s5eyEAQ2xUwHoM,88
125
126
  autocoder/utils/operate_config_api.py,sha256=99YAKsuUFLPwrRvj0CJal_bAPgyiXWMma6ZKMU56thw,5790
@@ -129,9 +130,12 @@ autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1
129
130
  autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
130
131
  autocoder/utils/rest.py,sha256=opE_kBEdNQdxh350M5lUTMk5TViRfpuKP_qWc0B1lks,8861
131
132
  autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
132
- auto_coder-0.1.228.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
133
- auto_coder-0.1.228.dist-info/METADATA,sha256=EUX3bowx5PPKjP_eJj8nV1PnpRQhhjpPOWjujzpnWM8,2615
134
- auto_coder-0.1.228.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
135
- auto_coder-0.1.228.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
136
- auto_coder-0.1.228.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
137
- auto_coder-0.1.228.dist-info/RECORD,,
133
+ autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
134
+ autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=pBOyWa1qwCcsAag1XsLIeTMv_D4QN4ppGo5jFiKzIkE,4165
135
+ autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
136
+ auto_coder-0.1.230.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
137
+ auto_coder-0.1.230.dist-info/METADATA,sha256=wzVDi7uDvqJhfmVUjNRUKhavnCONO-IlDuw5wn8clxw,2641
138
+ auto_coder-0.1.230.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
139
+ auto_coder-0.1.230.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
140
+ auto_coder-0.1.230.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
141
+ auto_coder-0.1.230.dist-info/RECORD,,
autocoder/auto_coder.py CHANGED
@@ -42,6 +42,7 @@ from rich.live import Live
42
42
  from autocoder.auto_coder_lang import get_message
43
43
  from autocoder.common.memory_manager import save_to_memory_file
44
44
  from autocoder import models as models_module
45
+ from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
45
46
 
46
47
  console = Console()
47
48
 
@@ -279,6 +280,7 @@ def main(input_args: Optional[List[str]] = None):
279
280
  byzerllm.connect_cluster(address=args.ray_address)
280
281
 
281
282
  llm = byzerllm.ByzerLLM(verbose=args.print_request)
283
+
282
284
  if args.product_mode == "lite":
283
285
  llm = byzerllm.SimpleByzerLLM(default_model_name="deepseek_chat")
284
286
  api_key_dir = os.path.expanduser("~/.auto-coder/keys")
@@ -297,7 +299,8 @@ def main(input_args: Optional[List[str]] = None):
297
299
  infer_params={
298
300
  "saas.base_url": "https://api.deepseek.com/v1",
299
301
  "saas.api_key": api_key,
300
- "saas.model": "deepseek-chat"
302
+ "saas.model": "deepseek-chat",
303
+ "saas.is_reasoning": False
301
304
  }
302
305
  )
303
306
 
@@ -309,7 +312,8 @@ def main(input_args: Optional[List[str]] = None):
309
312
  infer_params={
310
313
  "saas.base_url": "https://api.deepseek.com/v1",
311
314
  "saas.api_key": api_key,
312
- "saas.model": "deepseek-chat"
315
+ "saas.model": "deepseek-chat",
316
+ "saas.is_reasoning": False
313
317
  }
314
318
  )
315
319
 
@@ -321,7 +325,8 @@ def main(input_args: Optional[List[str]] = None):
321
325
  infer_params={
322
326
  "saas.base_url": "https://api.deepseek.com/v1",
323
327
  "saas.api_key": api_key,
324
- "saas.model": "deepseek-reasoner"
328
+ "saas.model": "deepseek-reasoner",
329
+ "saas.is_reasoning": True
325
330
  }
326
331
  )
327
332
 
@@ -333,7 +338,8 @@ def main(input_args: Optional[List[str]] = None):
333
338
  infer_params={
334
339
  "saas.base_url": "https://api.deepseek.com/v1",
335
340
  "saas.api_key": api_key,
336
- "saas.model": "deepseek-reasoner"
341
+ "saas.model": "deepseek-reasoner",
342
+ "saas.is_reasoning": True
337
343
  }
338
344
  )
339
345
 
@@ -359,7 +365,8 @@ def main(input_args: Optional[List[str]] = None):
359
365
  infer_params={
360
366
  "saas.base_url": model_info["base_url"],
361
367
  "saas.api_key": model_info["api_key"],
362
- "saas.model": model_info["model_name"]
368
+ "saas.model": model_info["model_name"],
369
+ "saas.is_reasoning": model_info["is_reasoning"]
363
370
  }
364
371
  )
365
372
  models.append(code_model)
@@ -376,7 +383,8 @@ def main(input_args: Optional[List[str]] = None):
376
383
  infer_params={
377
384
  "saas.base_url": model_info["base_url"],
378
385
  "saas.api_key": model_info["api_key"],
379
- "saas.model": model_info["model_name"]
386
+ "saas.model": model_info["model_name"],
387
+ "saas.is_reasoning": model_info["is_reasoning"]
380
388
  }
381
389
  )
382
390
  llm.setup_sub_client("code_model", code_model)
@@ -396,7 +404,8 @@ def main(input_args: Optional[List[str]] = None):
396
404
  infer_params={
397
405
  "saas.base_url": model_info["base_url"],
398
406
  "saas.api_key": model_info["api_key"],
399
- "saas.model": model_info["model_name"]
407
+ "saas.model": model_info["model_name"],
408
+ "saas.is_reasoning": model_info["is_reasoning"]
400
409
  }
401
410
  )
402
411
  models.append(rerank_model)
@@ -413,7 +422,8 @@ def main(input_args: Optional[List[str]] = None):
413
422
  infer_params={
414
423
  "saas.base_url": model_info["base_url"],
415
424
  "saas.api_key": model_info["api_key"],
416
- "saas.model": model_info["model_name"]
425
+ "saas.model": model_info["model_name"],
426
+ "saas.is_reasoning": model_info["is_reasoning"]
417
427
  }
418
428
  )
419
429
  llm.setup_sub_client("generate_rerank_model", rerank_model)
@@ -429,7 +439,8 @@ def main(input_args: Optional[List[str]] = None):
429
439
  infer_params={
430
440
  "saas.base_url": model_info["base_url"],
431
441
  "saas.api_key": model_info["api_key"],
432
- "saas.model": model_info["model_name"]
442
+ "saas.model": model_info["model_name"],
443
+ "saas.is_reasoning": model_info["is_reasoning"]
433
444
  }
434
445
  )
435
446
  llm.setup_sub_client("inference_model", inference_model)
@@ -609,7 +620,8 @@ def main(input_args: Optional[List[str]] = None):
609
620
  infer_params={
610
621
  "saas.base_url": model_info["base_url"],
611
622
  "saas.api_key": model_info["api_key"],
612
- "saas.model": model_info["model_name"]
623
+ "saas.model": model_info["model_name"],
624
+ "saas.is_reasoning": model_info["is_reasoning"]
613
625
  }
614
626
  )
615
627
  llm.setup_sub_client("chat_model", chat_model)
@@ -625,7 +637,8 @@ def main(input_args: Optional[List[str]] = None):
625
637
  infer_params={
626
638
  "saas.base_url": model_info["base_url"],
627
639
  "saas.api_key": model_info["api_key"],
628
- "saas.model": model_info["model_name"]
640
+ "saas.model": model_info["model_name"],
641
+ "saas.is_reasoning": model_info["is_reasoning"]
629
642
  }
630
643
  )
631
644
  llm.setup_sub_client("vl_model", vl_model)
@@ -641,7 +654,8 @@ def main(input_args: Optional[List[str]] = None):
641
654
  infer_params={
642
655
  "saas.base_url": model_info["base_url"],
643
656
  "saas.api_key": model_info["api_key"],
644
- "saas.model": model_info["model_name"]
657
+ "saas.model": model_info["model_name"],
658
+ "saas.is_reasoning": model_info["is_reasoning"]
645
659
  }
646
660
  )
647
661
  llm.setup_sub_client("sd_model", sd_model)
@@ -657,7 +671,8 @@ def main(input_args: Optional[List[str]] = None):
657
671
  infer_params={
658
672
  "saas.base_url": model_info["base_url"],
659
673
  "saas.api_key": model_info["api_key"],
660
- "saas.model": model_info["model_name"]
674
+ "saas.model": model_info["model_name"],
675
+ "saas.is_reasoning": model_info["is_reasoning"]
661
676
  }
662
677
  )
663
678
  llm.setup_sub_client("text2voice_model", text2voice_model)
@@ -673,7 +688,8 @@ def main(input_args: Optional[List[str]] = None):
673
688
  infer_params={
674
689
  "saas.base_url": model_info["base_url"],
675
690
  "saas.api_key": model_info["api_key"],
676
- "saas.model": model_info["model_name"]
691
+ "saas.model": model_info["model_name"],
692
+ "saas.is_reasoning": model_info["is_reasoning"]
677
693
  }
678
694
  )
679
695
  llm.setup_sub_client("voice2text_model", voice2text_model)
@@ -689,7 +705,8 @@ def main(input_args: Optional[List[str]] = None):
689
705
  infer_params={
690
706
  "saas.base_url": model_info["base_url"],
691
707
  "saas.api_key": model_info["api_key"],
692
- "saas.model": model_info["model_name"]
708
+ "saas.model": model_info["model_name"],
709
+ "saas.is_reasoning": model_info["is_reasoning"]
693
710
  }
694
711
  )
695
712
  llm.setup_sub_client("planner_model", planner_model)
@@ -705,7 +722,8 @@ def main(input_args: Optional[List[str]] = None):
705
722
  infer_params={
706
723
  "saas.base_url": model_info["base_url"],
707
724
  "saas.api_key": model_info["api_key"],
708
- "saas.model": model_info["model_name"]
725
+ "saas.model": model_info["model_name"],
726
+ "saas.is_reasoning": model_info["is_reasoning"]
709
727
  }
710
728
  )
711
729
  llm.setup_sub_client("designer_model", designer_model)
@@ -721,7 +739,8 @@ def main(input_args: Optional[List[str]] = None):
721
739
  infer_params={
722
740
  "saas.base_url": model_info["base_url"],
723
741
  "saas.api_key": model_info["api_key"],
724
- "saas.model": model_info["model_name"]
742
+ "saas.model": model_info["model_name"],
743
+ "saas.is_reasoning": model_info["is_reasoning"]
725
744
  }
726
745
  )
727
746
  llm.setup_sub_client("emb_model", emb_model)
@@ -1257,64 +1276,11 @@ def main(input_args: Optional[List[str]] = None):
1257
1276
  v = chat_llm.stream_chat_oai(
1258
1277
  conversations=loaded_conversations, delta_mode=True
1259
1278
  )
1260
-
1261
- assistant_response = ""
1262
- markdown_content = ""
1263
-
1264
- try:
1265
- with Live(
1266
- Panel("", title="Response", border_style="green", expand=False),
1267
- refresh_per_second=4,
1268
- auto_refresh=True,
1269
- vertical_overflow="visible",
1270
- console=Console(force_terminal=True, color_system="auto", height=None)
1271
- ) as live:
1272
- for res in v:
1273
- markdown_content += res[0]
1274
- assistant_response += res[0]
1275
- if args.request_id:
1276
- request_queue.add_request(
1277
- args.request_id,
1278
- RequestValue(
1279
- value=StreamValue(value=[res[0]]),
1280
- status=RequestOption.RUNNING,
1281
- ),
1282
- )
1283
- live.update(
1284
- Panel(
1285
- Markdown(markdown_content),
1286
- title="Response",
1287
- border_style="green",
1288
- expand=False,
1289
- )
1290
- )
1291
- live.update(
1292
- Panel(
1293
- Markdown(markdown_content),
1294
- title="Response",
1295
- border_style="green",
1296
- expand=False,
1297
- )
1298
- )
1299
- except Exception as e:
1300
- ##MARK
1301
- console.print(Panel(
1302
- f"Error: {str(e)}",
1303
- title="Error",
1304
- border_style="red"
1305
- ))
1306
- request_queue.add_request(
1307
- args.request_id,
1308
- RequestValue(
1309
- value=StreamValue(value=[str(e)]), status=RequestOption.FAILED
1310
- ),
1311
- )
1312
- finally:
1313
- request_queue.add_request(
1314
- args.request_id,
1315
- RequestValue(
1316
- value=StreamValue(value=[""]), status=RequestOption.COMPLETED
1317
- ),
1279
+
1280
+ assistant_response, last_meta = stream_out(
1281
+ v,
1282
+ request_id=args.request_id,
1283
+ console=console
1318
1284
  )
1319
1285
 
1320
1286
  chat_history["ask_conversation"].append(
@@ -51,7 +51,7 @@ from byzerllm.utils import format_str_jinja2
51
51
  from autocoder.common.memory_manager import get_global_memory_file_paths
52
52
  from autocoder import models
53
53
  import shlex
54
-
54
+ from autocoder.utils.llms import get_single_llm
55
55
 
56
56
  class SymbolItem(BaseModel):
57
57
  symbol_name: str
@@ -1316,6 +1316,9 @@ def ask(query: str):
1316
1316
  if "code_model" in conf:
1317
1317
  yaml_config["code_model"] = conf["code_model"]
1318
1318
 
1319
+ if "product_mode" in conf:
1320
+ yaml_config["product_mode"] = conf["product_mode"]
1321
+
1319
1322
  yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1320
1323
 
1321
1324
  execute_file = os.path.join("actions", f"{uuid.uuid4()}.yml")
@@ -1589,18 +1592,9 @@ def code_next(query: str):
1589
1592
  )
1590
1593
 
1591
1594
 
1592
- def get_single_llm(model_names: str):
1593
- if "," in model_names:
1594
- # Multiple code models specified
1595
- model_names = model_names.split(",")
1596
- for _, model_name in enumerate(model_names):
1597
- return byzerllm.ByzerLLM.from_default_model(model_name)
1598
- else:
1599
- # Single code model
1600
- return byzerllm.ByzerLLM.from_default_model(model_names)
1601
-
1602
-
1603
1595
  def commit(query: str):
1596
+ conf = memory.get("conf", {})
1597
+ product_mode = conf.get("product_mode", "lite")
1604
1598
  def prepare_commit_yaml():
1605
1599
  auto_coder_main(["next", "chat_action"])
1606
1600
 
@@ -1652,7 +1646,7 @@ def commit(query: str):
1652
1646
  if os.path.exists(temp_yaml):
1653
1647
  os.remove(temp_yaml)
1654
1648
 
1655
- llm = get_single_llm(args.code_model or args.model)
1649
+ llm = get_single_llm(args.code_model or args.model, product_mode)
1656
1650
  uncommitted_changes = git_utils.get_uncommitted_changes(".")
1657
1651
  commit_message = git_utils.generate_commit_message.with_llm(llm).run(
1658
1652
  uncommitted_changes
@@ -1672,6 +1666,8 @@ def commit(query: str):
1672
1666
  )
1673
1667
  git_utils.print_commit_info(commit_result=commit_result)
1674
1668
  except Exception as e:
1669
+ import traceback
1670
+ traceback.print_exc()
1675
1671
  print(f"Failed to commit: {e}")
1676
1672
  if execute_file:
1677
1673
  os.remove(execute_file)
@@ -1949,6 +1945,9 @@ def summon(query: str):
1949
1945
  if "model" in conf:
1950
1946
  yaml_config["model"] = conf["model"]
1951
1947
 
1948
+ if "product_mode" in conf:
1949
+ yaml_config["product_mode"] = conf["product_mode"]
1950
+
1952
1951
  yaml_content = convert_yaml_config_to_str(yaml_config=yaml_config)
1953
1952
 
1954
1953
  execute_file = os.path.join("actions", f"{uuid.uuid4()}.yml")
@@ -2173,7 +2172,8 @@ def manage_models(params, query: str):
2173
2172
  "model_name": data_dict.get("model_name", data_dict["name"]),
2174
2173
  "base_url": data_dict.get("base_url", "https://api.openai.com/v1"),
2175
2174
  "api_key_path": data_dict.get("api_key_path", "api.openai.com"),
2176
- "description": data_dict.get("description", "")
2175
+ "description": data_dict.get("description", ""),
2176
+ "is_reasoning": data_dict.get("is_reasoning", "false") in ["true", "True", "TRUE", "1"]
2177
2177
  }
2178
2178
 
2179
2179
  models_data.append(final_model)
@@ -2650,14 +2650,14 @@ def main():
2650
2650
  if not query:
2651
2651
  print("Please enter your query.")
2652
2652
  else:
2653
- manage_models(ARGS,query)
2653
+ manage_models(ARGS,query)
2654
2654
 
2655
2655
  elif user_input.startswith("/mode"):
2656
2656
  conf = user_input[len("/mode"):].strip()
2657
2657
  if not conf:
2658
2658
  print(memory["mode"])
2659
2659
  else:
2660
- memory["mode"] = conf
2660
+ memory["mode"] = conf
2661
2661
 
2662
2662
  elif user_input.startswith("/conf"):
2663
2663
  conf = user_input[len("/conf"):].strip()
autocoder/models.py CHANGED
@@ -13,7 +13,8 @@ default_models_list = [
13
13
  "model_name": "deepseek-reasoner",
14
14
  "model_type": "saas/openai",
15
15
  "base_url": "https://api.deepseek.com/v1",
16
- "api_key_path": "api.deepseek.com"
16
+ "api_key_path": "api.deepseek.com",
17
+ "is_reasoning": True
17
18
  },
18
19
  {
19
20
  "name": "deepseek_chat",
@@ -21,7 +22,8 @@ default_models_list = [
21
22
  "model_name": "deepseek-chat",
22
23
  "model_type": "saas/openai",
23
24
  "base_url": "https://api.deepseek.com/v1",
24
- "api_key_path": "api.deepseek.com"
25
+ "api_key_path": "api.deepseek.com",
26
+ "is_reasoning": False
25
27
  },
26
28
  {
27
29
  "name":"o1",
@@ -29,7 +31,8 @@ default_models_list = [
29
31
  "model_name": "o1-2024-12-17",
30
32
  "model_type": "saas/openai",
31
33
  "base_url": "https://api.openai.com/v1",
32
- "api_key_path": ""
34
+ "api_key_path": "",
35
+ "is_reasoning": True
33
36
  }
34
37
  ]
35
38
 
@@ -51,9 +54,12 @@ def load_models() -> List[Dict]:
51
54
  custom_models = json.load(f)
52
55
  # Custom models will override defaults with same name
53
56
  for model in custom_models:
57
+ model["is_reasoning"] = model.get("is_reasoning", False)
54
58
  models_dict[model["name"]] = model
59
+
55
60
  except json.JSONDecodeError:
56
61
  # If JSON is invalid, just use defaults
62
+ print("JSON is invalid, using defaults")
57
63
  save_models(default_models_list)
58
64
  else:
59
65
  # If file doesn't exist, create it with defaults
@@ -123,7 +129,7 @@ def update_model_with_api_key(name: str, api_key: str) -> Dict:
123
129
  # 在现有模型中查找
124
130
  found_model = None
125
131
  for model in models:
126
- if model["name"] == name:
132
+ if model["name"] == name.strip():
127
133
  found_model = model
128
134
  break
129
135
 
@@ -140,7 +146,7 @@ def update_model_with_api_key(name: str, api_key: str) -> Dict:
140
146
  os.makedirs(api_key_dir, exist_ok=True)
141
147
  api_key_file = os.path.join(api_key_dir, api_key_path)
142
148
  with open(api_key_file, "w") as f:
143
- f.write(api_key)
149
+ f.write(api_key.strip())
144
150
 
145
151
  # 如果是新模型,添加到模型列表中
146
152
  if all(model["name"] != name for model in models):
File without changes
@@ -0,0 +1,120 @@
1
+ from rich.console import Console
2
+ from rich.live import Live
3
+ from rich.panel import Panel
4
+ from rich.markdown import Markdown
5
+ from typing import Generator, List, Dict, Any, Optional, Tuple
6
+ from autocoder.utils.request_queue import RequestValue, RequestOption, StreamValue
7
+ from autocoder.utils.request_queue import request_queue
8
+
9
+ MAX_HISTORY_LINES = 40 # 最大保留历史行数
10
+
11
+ def stream_out(
12
+ stream_generator: Generator[Tuple[str, Dict[str, Any]], None, None],
13
+ request_id: Optional[str] = None,
14
+ console: Optional[Console] = None
15
+ ) -> Tuple[str, Optional[Dict[str, Any]]]:
16
+ """
17
+ 处理流式输出事件并在终端中展示
18
+
19
+ Args:
20
+ stream_generator: 生成流式输出的生成器
21
+ request_id: 请求ID,用于更新请求队列
22
+ console: Rich Console对象
23
+
24
+ Returns:
25
+ Tuple[str, Dict[str, Any]]: 返回完整的响应内容和最后的元数据
26
+ """
27
+ if console is None:
28
+ console = Console(force_terminal=True, color_system="auto", height=None)
29
+
30
+ lines_buffer = [] # 存储历史行
31
+ current_line = "" # 当前行
32
+ assistant_response = ""
33
+ last_meta = None
34
+
35
+ try:
36
+ with Live(
37
+ Panel("", title="Response", border_style="green"),
38
+ refresh_per_second=4,
39
+ console=console
40
+ ) as live:
41
+ for res in stream_generator:
42
+ last_meta = res[1]
43
+ content = res[0]
44
+ assistant_response += content
45
+
46
+ # 处理所有行
47
+ parts = (current_line + content).split("\n")
48
+
49
+ # 最后一部分是未完成的新行
50
+ if len(parts) > 1:
51
+ # 将完整行加入缓冲区
52
+ lines_buffer.extend(parts[:-1])
53
+ # 保留最大行数限制
54
+ if len(lines_buffer) > MAX_HISTORY_LINES:
55
+ del lines_buffer[0:len(lines_buffer) - MAX_HISTORY_LINES]
56
+
57
+ # 更新当前行
58
+ current_line = parts[-1]
59
+
60
+ # 构建显示内容 = 历史行 + 当前行
61
+ display_content = "\n".join(lines_buffer[-MAX_HISTORY_LINES:] + [current_line])
62
+
63
+ if request_id and request_queue:
64
+ request_queue.add_request(
65
+ request_id,
66
+ RequestValue(
67
+ value=StreamValue(value=[content]),
68
+ status=RequestOption.RUNNING,
69
+ ),
70
+ )
71
+
72
+ live.update(
73
+ Panel(
74
+ Markdown(display_content),
75
+ title="Response",
76
+ border_style="green",
77
+ height=min(50, live.console.height - 4)
78
+ )
79
+ )
80
+
81
+ # 处理最后一行的内容
82
+ if current_line:
83
+ lines_buffer.append(current_line)
84
+
85
+ # 最终显示结果
86
+ live.update(
87
+ Panel(
88
+ Markdown(assistant_response),
89
+ title="Final Response",
90
+ border_style="blue"
91
+ )
92
+ )
93
+
94
+ except Exception as e:
95
+ console.print(Panel(
96
+ f"Error: {str(e)}",
97
+ title="Error",
98
+ border_style="red"
99
+ ))
100
+
101
+ if request_id and request_queue:
102
+ request_queue.add_request(
103
+ request_id,
104
+ RequestValue(
105
+ value=StreamValue(value=[str(e)]),
106
+ status=RequestOption.FAILED
107
+ ),
108
+ )
109
+
110
+ finally:
111
+ if request_id and request_queue:
112
+ request_queue.add_request(
113
+ request_id,
114
+ RequestValue(
115
+ value=StreamValue(value=[""]),
116
+ status=RequestOption.COMPLETED
117
+ ),
118
+ )
119
+
120
+ return assistant_response, last_meta
File without changes
@@ -0,0 +1,52 @@
1
+ import byzerllm
2
+ from autocoder.auto_coder import models_module
3
+
4
+ def get_single_llm(model_names: str, product_mode: str):
5
+ if product_mode == "pro":
6
+ if "," in model_names:
7
+ # Multiple code models specified
8
+ model_names = model_names.split(",")
9
+ for _, model_name in enumerate(model_names):
10
+ return byzerllm.ByzerLLM.from_default_model(model_name)
11
+ else:
12
+ # Single code model
13
+ return byzerllm.ByzerLLM.from_default_model(model_names)
14
+
15
+ if product_mode == "lite":
16
+ if "," in model_names:
17
+ # Multiple code models specified
18
+ model_names = model_names.split(",")
19
+ for _, model_name in enumerate(model_names):
20
+ model_name = model_name.strip()
21
+ model_info = models_module.get_model_by_name(model_name)
22
+ target_llm = byzerllm.SimpleByzerLLM(default_model_name=model_name)
23
+ target_llm.deploy(
24
+ model_path="",
25
+ pretrained_model_type=model_info["model_type"],
26
+ udf_name=model_name,
27
+ infer_params={
28
+ "saas.base_url": model_info["base_url"],
29
+ "saas.api_key": model_info["api_key"],
30
+ "saas.model": model_info["model_name"],
31
+ "saas.is_reasoning": model_info["is_reasoning"]
32
+ }
33
+ )
34
+ return target_llm
35
+
36
+ else:
37
+ # Single code model
38
+ model_info = models_module.get_model_by_name(model_names)
39
+ model_name = model_names
40
+ target_llm = byzerllm.SimpleByzerLLM(default_model_name=model_name)
41
+ target_llm.deploy(
42
+ model_path="",
43
+ pretrained_model_type=model_info["model_type"],
44
+ udf_name=model_name,
45
+ infer_params={
46
+ "saas.base_url": model_info["base_url"],
47
+ "saas.api_key": model_info["api_key"],
48
+ "saas.model": model_info["model_name"],
49
+ "saas.is_reasoning": model_info["is_reasoning"]
50
+ }
51
+ )
52
+ return target_llm
autocoder/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.1.228"
1
+ __version__ = "0.1.230"