auto-coder 0.1.228__py3-none-any.whl → 0.1.229__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/METADATA +3 -2
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/RECORD +12 -10
- autocoder/auto_coder.py +6 -58
- autocoder/chat_auto_coder.py +13 -3
- autocoder/models.py +8 -5
- autocoder/utils/auto_coder_utils/__init__.py +0 -0
- autocoder/utils/auto_coder_utils/chat_stream_out.py +120 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.228.dist-info → auto_coder-0.1.229.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: auto-coder
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.229
|
|
4
4
|
Summary: AutoCoder: AutoCoder
|
|
5
5
|
Author: allwefantasy
|
|
6
6
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
@@ -26,7 +26,7 @@ Requires-Dist: tabulate
|
|
|
26
26
|
Requires-Dist: jupyter-client
|
|
27
27
|
Requires-Dist: prompt-toolkit
|
|
28
28
|
Requires-Dist: tokenizers
|
|
29
|
-
Requires-Dist: byzerllm[saas] >=0.1.
|
|
29
|
+
Requires-Dist: byzerllm[saas] >=0.1.150
|
|
30
30
|
Requires-Dist: patch
|
|
31
31
|
Requires-Dist: diff-match-patch
|
|
32
32
|
Requires-Dist: GitPython
|
|
@@ -35,6 +35,7 @@ Requires-Dist: anthropic
|
|
|
35
35
|
Requires-Dist: google-generativeai
|
|
36
36
|
Requires-Dist: protobuf
|
|
37
37
|
Requires-Dist: azure-cognitiveservices-speech
|
|
38
|
+
Requires-Dist: real-agent
|
|
38
39
|
Requires-Dist: python-docx
|
|
39
40
|
Requires-Dist: docx2txt
|
|
40
41
|
Requires-Dist: pdf2image
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
autocoder/auto_coder.py,sha256=
|
|
2
|
+
autocoder/auto_coder.py,sha256=TGauh4UJQoPTL8rplB_DzziXnVw37A6-vs0TpT6VtVA,57054
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
4
|
autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
|
|
5
5
|
autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
|
|
6
6
|
autocoder/auto_coder_rag_mcp.py,sha256=-RrjNwFaS2e5v8XDIrKR-zlUNUE8UBaeOtojffBrvJo,8521
|
|
7
7
|
autocoder/auto_coder_server.py,sha256=XU9b4SBH7zjPPXaTWWHV4_zJm-XYa6njuLQaplYJH_c,20290
|
|
8
8
|
autocoder/benchmark.py,sha256=Ypomkdzd1T3GE6dRICY3Hj547dZ6_inqJbBJIp5QMco,4423
|
|
9
|
-
autocoder/chat_auto_coder.py,sha256=
|
|
9
|
+
autocoder/chat_auto_coder.py,sha256=eufxzEhopHzLPZUhE8epP5d3JVvcUWXrl_DdSh2-Sfs,101889
|
|
10
10
|
autocoder/chat_auto_coder_lang.py,sha256=YJsFi8an0Kjbo9X7xKZfpdbHS3rbhrvChZNjWqEQ5Sw,11032
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
|
-
autocoder/models.py,sha256=
|
|
14
|
-
autocoder/version.py,sha256=
|
|
13
|
+
autocoder/models.py,sha256=YIvJZJ_Vrmff9mLyDuHvo8zd4YOnNP3v0uYG8oSu7ZM,5162
|
|
14
|
+
autocoder/version.py,sha256=sfyghLRAQ6aSLULneoCBX_LJssf68smbuYMGOKVEtDI,24
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -129,9 +129,11 @@ autocoder/utils/request_event_queue.py,sha256=r3lo5qGsB1dIjzVQ05dnr0z_9Z3zOkBdP1
|
|
|
129
129
|
autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-yPE,3889
|
|
130
130
|
autocoder/utils/rest.py,sha256=opE_kBEdNQdxh350M5lUTMk5TViRfpuKP_qWc0B1lks,8861
|
|
131
131
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
auto_coder-0.1.
|
|
135
|
-
auto_coder-0.1.
|
|
136
|
-
auto_coder-0.1.
|
|
137
|
-
auto_coder-0.1.
|
|
132
|
+
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
133
|
+
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=pBOyWa1qwCcsAag1XsLIeTMv_D4QN4ppGo5jFiKzIkE,4165
|
|
134
|
+
auto_coder-0.1.229.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
135
|
+
auto_coder-0.1.229.dist-info/METADATA,sha256=TAvwuRtI5_rO2QiAbGV_ug6L6bgkj8Jkj1p_p_zjz88,2641
|
|
136
|
+
auto_coder-0.1.229.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
137
|
+
auto_coder-0.1.229.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
138
|
+
auto_coder-0.1.229.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
139
|
+
auto_coder-0.1.229.dist-info/RECORD,,
|
autocoder/auto_coder.py
CHANGED
|
@@ -42,6 +42,7 @@ from rich.live import Live
|
|
|
42
42
|
from autocoder.auto_coder_lang import get_message
|
|
43
43
|
from autocoder.common.memory_manager import save_to_memory_file
|
|
44
44
|
from autocoder import models as models_module
|
|
45
|
+
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
45
46
|
|
|
46
47
|
console = Console()
|
|
47
48
|
|
|
@@ -1257,64 +1258,11 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1257
1258
|
v = chat_llm.stream_chat_oai(
|
|
1258
1259
|
conversations=loaded_conversations, delta_mode=True
|
|
1259
1260
|
)
|
|
1260
|
-
|
|
1261
|
-
assistant_response =
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
with Live(
|
|
1266
|
-
Panel("", title="Response", border_style="green", expand=False),
|
|
1267
|
-
refresh_per_second=4,
|
|
1268
|
-
auto_refresh=True,
|
|
1269
|
-
vertical_overflow="visible",
|
|
1270
|
-
console=Console(force_terminal=True, color_system="auto", height=None)
|
|
1271
|
-
) as live:
|
|
1272
|
-
for res in v:
|
|
1273
|
-
markdown_content += res[0]
|
|
1274
|
-
assistant_response += res[0]
|
|
1275
|
-
if args.request_id:
|
|
1276
|
-
request_queue.add_request(
|
|
1277
|
-
args.request_id,
|
|
1278
|
-
RequestValue(
|
|
1279
|
-
value=StreamValue(value=[res[0]]),
|
|
1280
|
-
status=RequestOption.RUNNING,
|
|
1281
|
-
),
|
|
1282
|
-
)
|
|
1283
|
-
live.update(
|
|
1284
|
-
Panel(
|
|
1285
|
-
Markdown(markdown_content),
|
|
1286
|
-
title="Response",
|
|
1287
|
-
border_style="green",
|
|
1288
|
-
expand=False,
|
|
1289
|
-
)
|
|
1290
|
-
)
|
|
1291
|
-
live.update(
|
|
1292
|
-
Panel(
|
|
1293
|
-
Markdown(markdown_content),
|
|
1294
|
-
title="Response",
|
|
1295
|
-
border_style="green",
|
|
1296
|
-
expand=False,
|
|
1297
|
-
)
|
|
1298
|
-
)
|
|
1299
|
-
except Exception as e:
|
|
1300
|
-
##MARK
|
|
1301
|
-
console.print(Panel(
|
|
1302
|
-
f"Error: {str(e)}",
|
|
1303
|
-
title="Error",
|
|
1304
|
-
border_style="red"
|
|
1305
|
-
))
|
|
1306
|
-
request_queue.add_request(
|
|
1307
|
-
args.request_id,
|
|
1308
|
-
RequestValue(
|
|
1309
|
-
value=StreamValue(value=[str(e)]), status=RequestOption.FAILED
|
|
1310
|
-
),
|
|
1311
|
-
)
|
|
1312
|
-
finally:
|
|
1313
|
-
request_queue.add_request(
|
|
1314
|
-
args.request_id,
|
|
1315
|
-
RequestValue(
|
|
1316
|
-
value=StreamValue(value=[""]), status=RequestOption.COMPLETED
|
|
1317
|
-
),
|
|
1261
|
+
|
|
1262
|
+
assistant_response, last_meta = stream_out(
|
|
1263
|
+
v,
|
|
1264
|
+
request_id=args.request_id,
|
|
1265
|
+
console=console
|
|
1318
1266
|
)
|
|
1319
1267
|
|
|
1320
1268
|
chat_history["ask_conversation"].append(
|
autocoder/chat_auto_coder.py
CHANGED
|
@@ -2173,7 +2173,8 @@ def manage_models(params, query: str):
|
|
|
2173
2173
|
"model_name": data_dict.get("model_name", data_dict["name"]),
|
|
2174
2174
|
"base_url": data_dict.get("base_url", "https://api.openai.com/v1"),
|
|
2175
2175
|
"api_key_path": data_dict.get("api_key_path", "api.openai.com"),
|
|
2176
|
-
"description": data_dict.get("description", "")
|
|
2176
|
+
"description": data_dict.get("description", ""),
|
|
2177
|
+
"is_reasoning": data_dict.get("is_reasoning", "false") in ["true", "True", "TRUE", "1"]
|
|
2177
2178
|
}
|
|
2178
2179
|
|
|
2179
2180
|
models_data.append(final_model)
|
|
@@ -2476,6 +2477,9 @@ def lib_command(args: List[str]):
|
|
|
2476
2477
|
else:
|
|
2477
2478
|
console.print(f"Unknown subcommand: {subcommand}")
|
|
2478
2479
|
|
|
2480
|
+
def agent(query: str):
|
|
2481
|
+
console.print(f"Agent query: {query}")
|
|
2482
|
+
|
|
2479
2483
|
|
|
2480
2484
|
def main():
|
|
2481
2485
|
ARGS = parse_arguments()
|
|
@@ -2650,14 +2654,20 @@ def main():
|
|
|
2650
2654
|
if not query:
|
|
2651
2655
|
print("Please enter your query.")
|
|
2652
2656
|
else:
|
|
2653
|
-
manage_models(ARGS,query)
|
|
2657
|
+
manage_models(ARGS,query)
|
|
2658
|
+
elif user_input.startswith("/agent"):
|
|
2659
|
+
query = user_input[len("/agent"):].strip()
|
|
2660
|
+
if not query:
|
|
2661
|
+
print("Please enter your query.")
|
|
2662
|
+
else:
|
|
2663
|
+
agent(query)
|
|
2654
2664
|
|
|
2655
2665
|
elif user_input.startswith("/mode"):
|
|
2656
2666
|
conf = user_input[len("/mode"):].strip()
|
|
2657
2667
|
if not conf:
|
|
2658
2668
|
print(memory["mode"])
|
|
2659
2669
|
else:
|
|
2660
|
-
memory["mode"] = conf
|
|
2670
|
+
memory["mode"] = conf
|
|
2661
2671
|
|
|
2662
2672
|
elif user_input.startswith("/conf"):
|
|
2663
2673
|
conf = user_input[len("/conf"):].strip()
|
autocoder/models.py
CHANGED
|
@@ -13,7 +13,8 @@ default_models_list = [
|
|
|
13
13
|
"model_name": "deepseek-reasoner",
|
|
14
14
|
"model_type": "saas/openai",
|
|
15
15
|
"base_url": "https://api.deepseek.com/v1",
|
|
16
|
-
"api_key_path": "api.deepseek.com"
|
|
16
|
+
"api_key_path": "api.deepseek.com",
|
|
17
|
+
"is_reasoning": True
|
|
17
18
|
},
|
|
18
19
|
{
|
|
19
20
|
"name": "deepseek_chat",
|
|
@@ -21,7 +22,8 @@ default_models_list = [
|
|
|
21
22
|
"model_name": "deepseek-chat",
|
|
22
23
|
"model_type": "saas/openai",
|
|
23
24
|
"base_url": "https://api.deepseek.com/v1",
|
|
24
|
-
"api_key_path": "api.deepseek.com"
|
|
25
|
+
"api_key_path": "api.deepseek.com",
|
|
26
|
+
"is_reasoning": False
|
|
25
27
|
},
|
|
26
28
|
{
|
|
27
29
|
"name":"o1",
|
|
@@ -29,7 +31,8 @@ default_models_list = [
|
|
|
29
31
|
"model_name": "o1-2024-12-17",
|
|
30
32
|
"model_type": "saas/openai",
|
|
31
33
|
"base_url": "https://api.openai.com/v1",
|
|
32
|
-
"api_key_path": ""
|
|
34
|
+
"api_key_path": "",
|
|
35
|
+
"is_reasoning": False
|
|
33
36
|
}
|
|
34
37
|
]
|
|
35
38
|
|
|
@@ -123,7 +126,7 @@ def update_model_with_api_key(name: str, api_key: str) -> Dict:
|
|
|
123
126
|
# 在现有模型中查找
|
|
124
127
|
found_model = None
|
|
125
128
|
for model in models:
|
|
126
|
-
if model["name"] == name:
|
|
129
|
+
if model["name"] == name.strip():
|
|
127
130
|
found_model = model
|
|
128
131
|
break
|
|
129
132
|
|
|
@@ -140,7 +143,7 @@ def update_model_with_api_key(name: str, api_key: str) -> Dict:
|
|
|
140
143
|
os.makedirs(api_key_dir, exist_ok=True)
|
|
141
144
|
api_key_file = os.path.join(api_key_dir, api_key_path)
|
|
142
145
|
with open(api_key_file, "w") as f:
|
|
143
|
-
f.write(api_key)
|
|
146
|
+
f.write(api_key.strip())
|
|
144
147
|
|
|
145
148
|
# 如果是新模型,添加到模型列表中
|
|
146
149
|
if all(model["name"] != name for model in models):
|
|
File without changes
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from rich.console import Console
|
|
2
|
+
from rich.live import Live
|
|
3
|
+
from rich.panel import Panel
|
|
4
|
+
from rich.markdown import Markdown
|
|
5
|
+
from typing import Generator, List, Dict, Any, Optional, Tuple
|
|
6
|
+
from autocoder.utils.request_queue import RequestValue, RequestOption, StreamValue
|
|
7
|
+
from autocoder.utils.request_queue import request_queue
|
|
8
|
+
|
|
9
|
+
MAX_HISTORY_LINES = 40 # 最大保留历史行数
|
|
10
|
+
|
|
11
|
+
def stream_out(
|
|
12
|
+
stream_generator: Generator[Tuple[str, Dict[str, Any]], None, None],
|
|
13
|
+
request_id: Optional[str] = None,
|
|
14
|
+
console: Optional[Console] = None
|
|
15
|
+
) -> Tuple[str, Optional[Dict[str, Any]]]:
|
|
16
|
+
"""
|
|
17
|
+
处理流式输出事件并在终端中展示
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
stream_generator: 生成流式输出的生成器
|
|
21
|
+
request_id: 请求ID,用于更新请求队列
|
|
22
|
+
console: Rich Console对象
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Tuple[str, Dict[str, Any]]: 返回完整的响应内容和最后的元数据
|
|
26
|
+
"""
|
|
27
|
+
if console is None:
|
|
28
|
+
console = Console(force_terminal=True, color_system="auto", height=None)
|
|
29
|
+
|
|
30
|
+
lines_buffer = [] # 存储历史行
|
|
31
|
+
current_line = "" # 当前行
|
|
32
|
+
assistant_response = ""
|
|
33
|
+
last_meta = None
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
with Live(
|
|
37
|
+
Panel("", title="Response", border_style="green"),
|
|
38
|
+
refresh_per_second=4,
|
|
39
|
+
console=console
|
|
40
|
+
) as live:
|
|
41
|
+
for res in stream_generator:
|
|
42
|
+
last_meta = res[1]
|
|
43
|
+
content = res[0]
|
|
44
|
+
assistant_response += content
|
|
45
|
+
|
|
46
|
+
# 处理所有行
|
|
47
|
+
parts = (current_line + content).split("\n")
|
|
48
|
+
|
|
49
|
+
# 最后一部分是未完成的新行
|
|
50
|
+
if len(parts) > 1:
|
|
51
|
+
# 将完整行加入缓冲区
|
|
52
|
+
lines_buffer.extend(parts[:-1])
|
|
53
|
+
# 保留最大行数限制
|
|
54
|
+
if len(lines_buffer) > MAX_HISTORY_LINES:
|
|
55
|
+
del lines_buffer[0:len(lines_buffer) - MAX_HISTORY_LINES]
|
|
56
|
+
|
|
57
|
+
# 更新当前行
|
|
58
|
+
current_line = parts[-1]
|
|
59
|
+
|
|
60
|
+
# 构建显示内容 = 历史行 + 当前行
|
|
61
|
+
display_content = "\n".join(lines_buffer[-MAX_HISTORY_LINES:] + [current_line])
|
|
62
|
+
|
|
63
|
+
if request_id and request_queue:
|
|
64
|
+
request_queue.add_request(
|
|
65
|
+
request_id,
|
|
66
|
+
RequestValue(
|
|
67
|
+
value=StreamValue(value=[content]),
|
|
68
|
+
status=RequestOption.RUNNING,
|
|
69
|
+
),
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
live.update(
|
|
73
|
+
Panel(
|
|
74
|
+
Markdown(display_content),
|
|
75
|
+
title="Response",
|
|
76
|
+
border_style="green",
|
|
77
|
+
height=min(50, live.console.height - 4)
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# 处理最后一行的内容
|
|
82
|
+
if current_line:
|
|
83
|
+
lines_buffer.append(current_line)
|
|
84
|
+
|
|
85
|
+
# 最终显示结果
|
|
86
|
+
live.update(
|
|
87
|
+
Panel(
|
|
88
|
+
Markdown(assistant_response),
|
|
89
|
+
title="Final Response",
|
|
90
|
+
border_style="blue"
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
console.print(Panel(
|
|
96
|
+
f"Error: {str(e)}",
|
|
97
|
+
title="Error",
|
|
98
|
+
border_style="red"
|
|
99
|
+
))
|
|
100
|
+
|
|
101
|
+
if request_id and request_queue:
|
|
102
|
+
request_queue.add_request(
|
|
103
|
+
request_id,
|
|
104
|
+
RequestValue(
|
|
105
|
+
value=StreamValue(value=[str(e)]),
|
|
106
|
+
status=RequestOption.FAILED
|
|
107
|
+
),
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
finally:
|
|
111
|
+
if request_id and request_queue:
|
|
112
|
+
request_queue.add_request(
|
|
113
|
+
request_id,
|
|
114
|
+
RequestValue(
|
|
115
|
+
value=StreamValue(value=[""]),
|
|
116
|
+
status=RequestOption.COMPLETED
|
|
117
|
+
),
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
return assistant_response, last_meta
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.229"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|