auto-coder 0.1.231__py3-none-any.whl → 0.1.232__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/METADATA +1 -1
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/RECORD +10 -10
- autocoder/auto_coder.py +7 -4
- autocoder/common/utils_code_auto_generate.py +55 -3
- autocoder/utils/auto_coder_utils/chat_stream_out.py +1 -1
- autocoder/version.py +1 -1
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.231.dist-info → auto_coder-0.1.232.dist-info}/top_level.txt +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
autocoder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
autocoder/auto_coder.py,sha256=
|
|
2
|
+
autocoder/auto_coder.py,sha256=REnZJUy4Dx9b7Z8gM9D4SWMkbHXl0JjF-DFiod4aB-A,58357
|
|
3
3
|
autocoder/auto_coder_lang.py,sha256=Rtupq6N3_HT7JRhDKdgCBcwRaiAnyCOR_Gsp4jUomrI,3229
|
|
4
4
|
autocoder/auto_coder_rag.py,sha256=illKgzP2bv-Tq50ujsofJnOHdI4pzr0ALtfR8NHHWdQ,22351
|
|
5
5
|
autocoder/auto_coder_rag_client_mcp.py,sha256=WV7j5JUiQge0x4-B7Hp5-pSAFXLbvLpzQMcCovbauIM,6276
|
|
@@ -11,7 +11,7 @@ autocoder/chat_auto_coder_lang.py,sha256=YJsFi8an0Kjbo9X7xKZfpdbHS3rbhrvChZNjWqE
|
|
|
11
11
|
autocoder/command_args.py,sha256=9aYJ-AmPxP1sQh6ciw04FWHjSn31f2W9afXFwo8wgx4,30441
|
|
12
12
|
autocoder/lang.py,sha256=U6AjVV8Rs1uLyjFCZ8sT6WWuNUxMBqkXXIOs4S120uk,14511
|
|
13
13
|
autocoder/models.py,sha256=FlBrF6HhGao_RiCSgYhCmP7vs0KlG4hI_BI6dyZiL9s,5292
|
|
14
|
-
autocoder/version.py,sha256=
|
|
14
|
+
autocoder/version.py,sha256=pvV6K1v2HfuQywwyaRYD8uMohj-o0EfkRauHjMIrYtk,24
|
|
15
15
|
autocoder/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
autocoder/agent/auto_demand_organizer.py,sha256=NWSAEsEk94vT3lGjfo25kKLMwYdPcpy9e-i21txPasQ,6942
|
|
17
17
|
autocoder/agent/auto_filegroup.py,sha256=CW7bqp0FW1GIEMnl-blyAc2UGT7O9Mom0q66ITz1ckM,6635
|
|
@@ -61,7 +61,7 @@ autocoder/common/search_replace.py,sha256=GphFkc57Hb673CAwmbiocqTbw8vrV7TrZxtOhD
|
|
|
61
61
|
autocoder/common/sys_prompt.py,sha256=JlexfjZt554faqbgkCmzOJqYUzDHfbnxly5ugFfHfEE,26403
|
|
62
62
|
autocoder/common/text.py,sha256=KGRQq314GHBmY4MWG8ossRoQi1_DTotvhxchpn78c-k,1003
|
|
63
63
|
autocoder/common/types.py,sha256=PXTETrsTvhLE49jqAeUKGySvxBN9pjeyCgRHLDYdd9U,664
|
|
64
|
-
autocoder/common/utils_code_auto_generate.py,sha256=
|
|
64
|
+
autocoder/common/utils_code_auto_generate.py,sha256=QwJYr6QhjlQbSo231IWuI6c8G0kXkYDDoDHMo5NSXqI,3348
|
|
65
65
|
autocoder/common/mcp_servers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
66
66
|
autocoder/common/mcp_servers/mcp_server_perplexity.py,sha256=jz0LkCgZcqKkNdLZ9swNOu9Besoba4JOyHDedoZnWHo,5546
|
|
67
67
|
autocoder/data/tokenizer.json,sha256=QfO_ZCE9qMAS2L0IcaWKH99wRj6PCPEQ3bsQgvUp9mk,4607451
|
|
@@ -131,11 +131,11 @@ autocoder/utils/request_queue.py,sha256=nwp6PMtgTCiuwJI24p8OLNZjUiprC-TsefQrhMI-
|
|
|
131
131
|
autocoder/utils/rest.py,sha256=opE_kBEdNQdxh350M5lUTMk5TViRfpuKP_qWc0B1lks,8861
|
|
132
132
|
autocoder/utils/tests.py,sha256=BqphrwyycGAvs-5mhH8pKtMZdObwhFtJ5MC_ZAOiLq8,1340
|
|
133
133
|
autocoder/utils/auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
134
|
-
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=
|
|
134
|
+
autocoder/utils/auto_coder_utils/chat_stream_out.py,sha256=6D_SIa5hHSwIHC1poO_ztK7IVugAqNHu-jQySd7EnfQ,4181
|
|
135
135
|
autocoder/utils/chat_auto_coder_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
136
|
-
auto_coder-0.1.
|
|
137
|
-
auto_coder-0.1.
|
|
138
|
-
auto_coder-0.1.
|
|
139
|
-
auto_coder-0.1.
|
|
140
|
-
auto_coder-0.1.
|
|
141
|
-
auto_coder-0.1.
|
|
136
|
+
auto_coder-0.1.232.dist-info/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
137
|
+
auto_coder-0.1.232.dist-info/METADATA,sha256=5XhWylx2N9N4DNDoJpF3GUWHJzpNVg2wwgBZ5m_rBDU,2641
|
|
138
|
+
auto_coder-0.1.232.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
139
|
+
auto_coder-0.1.232.dist-info/entry_points.txt,sha256=0nzHtHH4pNcM7xq4EBA2toS28Qelrvcbrr59GqD_0Ak,350
|
|
140
|
+
auto_coder-0.1.232.dist-info/top_level.txt,sha256=Jqc0_uJSw2GwoFQAa9iJxYns-2mWla-9ok_Y3Gcznjk,10
|
|
141
|
+
auto_coder-0.1.232.dist-info/RECORD,,
|
autocoder/auto_coder.py
CHANGED
|
@@ -1272,10 +1272,13 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1272
1272
|
)
|
|
1273
1273
|
)
|
|
1274
1274
|
v = [[response.result,None]]
|
|
1275
|
-
else:
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1275
|
+
else:
|
|
1276
|
+
from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
|
|
1277
|
+
v = stream_chat_with_continue(
|
|
1278
|
+
llm=chat_llm,
|
|
1279
|
+
conversations=loaded_conversations,
|
|
1280
|
+
llm_config={}
|
|
1281
|
+
)
|
|
1279
1282
|
|
|
1280
1283
|
assistant_response, last_meta = stream_out(
|
|
1281
1284
|
v,
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
from byzerllm import ByzerLLM
|
|
2
|
-
from typing import List,Any,Union
|
|
1
|
+
from byzerllm import ByzerLLM,SimpleByzerLLM
|
|
2
|
+
from typing import Generator, List, Any, Union
|
|
3
3
|
from pydantic import BaseModel
|
|
4
4
|
from loguru import logger
|
|
5
|
+
|
|
5
6
|
class ChatWithContinueResult(BaseModel):
|
|
6
7
|
content: str
|
|
7
8
|
input_tokens_count: int
|
|
8
9
|
generated_tokens_count: int
|
|
9
10
|
|
|
10
11
|
|
|
11
|
-
def chat_with_continue(llm: ByzerLLM, conversations: List[dict], llm_config: dict) -> ChatWithContinueResult:
|
|
12
|
+
def chat_with_continue(llm: Union[ByzerLLM,SimpleByzerLLM], conversations: List[dict], llm_config: dict) -> ChatWithContinueResult:
|
|
12
13
|
final_result = ChatWithContinueResult(content="", input_tokens_count=0, generated_tokens_count=0)
|
|
13
14
|
v = llm.chat_oai(
|
|
14
15
|
conversations=conversations, llm_config=llm_config)
|
|
@@ -36,3 +37,54 @@ def chat_with_continue(llm: ByzerLLM, conversations: List[dict], llm_config: dic
|
|
|
36
37
|
# logger.info(f"The code generation is exceed the max length, continue to generate the code {count -1 } times")
|
|
37
38
|
final_result.content = single_result
|
|
38
39
|
return final_result
|
|
40
|
+
|
|
41
|
+
def stream_chat_with_continue(
|
|
42
|
+
llm: Union[ByzerLLM, SimpleByzerLLM],
|
|
43
|
+
conversations: List[dict],
|
|
44
|
+
llm_config: dict
|
|
45
|
+
) -> Generator[Any, None, None]:
|
|
46
|
+
"""
|
|
47
|
+
流式处理并继续生成内容,直到完成。
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
llm (Union[ByzerLLM, SimpleByzerLLM]): LLM实例
|
|
51
|
+
conversations (List[dict]): 对话历史
|
|
52
|
+
llm_config (dict): LLM配置参数
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
count = 0
|
|
58
|
+
temp_conversations = conversations
|
|
59
|
+
current_metadata = None
|
|
60
|
+
|
|
61
|
+
while True:
|
|
62
|
+
# 使用流式接口获取生成内容
|
|
63
|
+
stream_generator = llm.stream_chat_oai(
|
|
64
|
+
conversations=temp_conversations,
|
|
65
|
+
delta_mode=True,
|
|
66
|
+
llm_config={**llm_config, "gen.response_prefix": True if count > 0 else False}
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
current_content = ""
|
|
70
|
+
for res in stream_generator:
|
|
71
|
+
content = res[0]
|
|
72
|
+
current_content += content
|
|
73
|
+
if current_metadata is None:
|
|
74
|
+
current_metadata = res[1]
|
|
75
|
+
else:
|
|
76
|
+
current_metadata.generated_tokens_count += res[1].generated_tokens_count
|
|
77
|
+
current_metadata.input_tokens_count += res[1].input_tokens_count
|
|
78
|
+
current_metadata.finish_reason = res[1].finish_reason
|
|
79
|
+
|
|
80
|
+
# Yield 当前的 StreamChatWithContinueResult
|
|
81
|
+
yield (content,current_metadata)
|
|
82
|
+
|
|
83
|
+
# 更新对话历史
|
|
84
|
+
temp_conversations.append({"role": "assistant", "content": current_content})
|
|
85
|
+
|
|
86
|
+
# 检查是否需要继续生成
|
|
87
|
+
if current_metadata.finish_reason != "length" or count >= 5:
|
|
88
|
+
break
|
|
89
|
+
|
|
90
|
+
count += 1
|
autocoder/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.232"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|