auto-coder 0.1.398__py3-none-any.whl → 0.1.399__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- auto_coder-0.1.399.dist-info/METADATA +396 -0
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.399.dist-info}/RECORD +62 -28
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.399.dist-info}/WHEEL +1 -1
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.399.dist-info}/entry_points.txt +2 -0
- autocoder/agent/base_agentic/base_agent.py +2 -2
- autocoder/agent/base_agentic/tools/replace_in_file_tool_resolver.py +1 -1
- autocoder/agent/entry_command_agent/__init__.py +29 -0
- autocoder/agent/entry_command_agent/auto_tool.py +61 -0
- autocoder/agent/entry_command_agent/chat.py +475 -0
- autocoder/agent/entry_command_agent/designer.py +53 -0
- autocoder/agent/entry_command_agent/generate_command.py +50 -0
- autocoder/agent/entry_command_agent/project_reader.py +58 -0
- autocoder/agent/entry_command_agent/voice2text.py +71 -0
- autocoder/auto_coder.py +23 -548
- autocoder/auto_coder_runner.py +510 -8
- autocoder/chat/rules_command.py +1 -1
- autocoder/chat_auto_coder.py +6 -1
- autocoder/common/ac_style_command_parser/__init__.py +15 -0
- autocoder/common/ac_style_command_parser/example.py +7 -0
- autocoder/{command_parser.py → common/ac_style_command_parser/parser.py} +1 -33
- autocoder/common/ac_style_command_parser/test_parser.py +516 -0
- autocoder/common/command_completer_v2.py +1 -1
- autocoder/common/command_file_manager/examples.py +22 -8
- autocoder/common/command_file_manager/manager.py +37 -6
- autocoder/common/conversations/get_conversation_manager.py +143 -0
- autocoder/common/conversations/manager.py +122 -11
- autocoder/common/conversations/storage/index_manager.py +89 -0
- autocoder/common/v2/agent/agentic_edit.py +131 -18
- autocoder/common/v2/agent/agentic_edit_types.py +10 -0
- autocoder/common/v2/code_auto_generate_editblock.py +10 -2
- autocoder/dispacher/__init__.py +10 -0
- autocoder/rags.py +0 -27
- autocoder/run_context.py +1 -0
- autocoder/sdk/__init__.py +188 -0
- autocoder/sdk/cli/__init__.py +15 -0
- autocoder/sdk/cli/__main__.py +26 -0
- autocoder/sdk/cli/completion_wrapper.py +38 -0
- autocoder/sdk/cli/formatters.py +211 -0
- autocoder/sdk/cli/handlers.py +174 -0
- autocoder/sdk/cli/install_completion.py +301 -0
- autocoder/sdk/cli/main.py +284 -0
- autocoder/sdk/cli/options.py +72 -0
- autocoder/sdk/constants.py +102 -0
- autocoder/sdk/core/__init__.py +20 -0
- autocoder/sdk/core/auto_coder_core.py +867 -0
- autocoder/sdk/core/bridge.py +497 -0
- autocoder/sdk/example.py +0 -0
- autocoder/sdk/exceptions.py +72 -0
- autocoder/sdk/models/__init__.py +19 -0
- autocoder/sdk/models/messages.py +209 -0
- autocoder/sdk/models/options.py +194 -0
- autocoder/sdk/models/responses.py +311 -0
- autocoder/sdk/session/__init__.py +32 -0
- autocoder/sdk/session/session.py +106 -0
- autocoder/sdk/session/session_manager.py +56 -0
- autocoder/sdk/utils/__init__.py +24 -0
- autocoder/sdk/utils/formatters.py +216 -0
- autocoder/sdk/utils/io_utils.py +302 -0
- autocoder/sdk/utils/validators.py +287 -0
- autocoder/version.py +2 -1
- auto_coder-0.1.398.dist-info/METADATA +0 -111
- autocoder/common/conversations/compatibility.py +0 -303
- autocoder/common/conversations/conversation_manager.py +0 -502
- autocoder/common/conversations/example.py +0 -152
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.399.dist-info/licenses}/LICENSE +0 -0
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.399.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import tempfile
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
|
|
9
|
+
from autocoder.utils.request_queue import (
|
|
10
|
+
request_queue,
|
|
11
|
+
RequestValue,
|
|
12
|
+
DefaultValue,
|
|
13
|
+
RequestOption,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class Voice2TextAgent:
|
|
18
|
+
def __init__(self, args, llm, raw_args):
|
|
19
|
+
self.args = args
|
|
20
|
+
self.llm = llm
|
|
21
|
+
self.raw_args = raw_args
|
|
22
|
+
self.console = Console()
|
|
23
|
+
|
|
24
|
+
def run(self):
|
|
25
|
+
"""执行 voice2text 命令的主要逻辑"""
|
|
26
|
+
from autocoder.common.audio import TranscribeAudio
|
|
27
|
+
|
|
28
|
+
transcribe_audio = TranscribeAudio()
|
|
29
|
+
temp_wav_file = os.path.join(
|
|
30
|
+
tempfile.gettempdir(), "voice_input.wav")
|
|
31
|
+
|
|
32
|
+
transcribe_audio.record_audio(temp_wav_file)
|
|
33
|
+
self.console.print(
|
|
34
|
+
Panel(
|
|
35
|
+
"Recording finished. Transcribing...",
|
|
36
|
+
title="Voice",
|
|
37
|
+
border_style="green",
|
|
38
|
+
)
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
if self.llm and self.llm.get_sub_client("voice2text_model"):
|
|
42
|
+
voice2text_llm = self.llm.get_sub_client("voice2text_model")
|
|
43
|
+
else:
|
|
44
|
+
voice2text_llm = self.llm
|
|
45
|
+
|
|
46
|
+
transcription = transcribe_audio.transcribe_audio(
|
|
47
|
+
temp_wav_file, voice2text_llm
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
self.console.print(
|
|
51
|
+
Panel(
|
|
52
|
+
f"Transcription: <_transcription_>{transcription}</_transcription_>",
|
|
53
|
+
title="Result",
|
|
54
|
+
border_style="magenta",
|
|
55
|
+
)
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
with open(os.path.join(".auto-coder", "exchange.txt"), "w", encoding="utf-8") as f:
|
|
59
|
+
f.write(transcription)
|
|
60
|
+
|
|
61
|
+
request_queue.add_request(
|
|
62
|
+
self.args.request_id,
|
|
63
|
+
RequestValue(
|
|
64
|
+
value=DefaultValue(value=transcription),
|
|
65
|
+
status=RequestOption.COMPLETED,
|
|
66
|
+
),
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
os.remove(temp_wav_file)
|
|
70
|
+
|
|
71
|
+
|
autocoder/auto_coder.py
CHANGED
|
@@ -893,566 +893,41 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
893
893
|
# )
|
|
894
894
|
return
|
|
895
895
|
elif raw_args.agent_command == "project_reader":
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
model_filter = ModelPathFilter.from_model_object(target_llm, args)
|
|
901
|
-
if model_filter.has_rules():
|
|
902
|
-
printer = Printer()
|
|
903
|
-
msg = printer.get_message_from_key_with_format("model_has_access_restrictions",
|
|
904
|
-
model_name=",".join(get_llm_names(target_llm)))
|
|
905
|
-
raise ValueError(msg)
|
|
906
|
-
|
|
907
|
-
from autocoder.agent.project_reader import ProjectReader
|
|
908
|
-
|
|
909
|
-
project_reader = ProjectReader(args, llm)
|
|
910
|
-
v = project_reader.run(args.query)
|
|
911
|
-
console = Console()
|
|
912
|
-
markdown_content = v
|
|
913
|
-
|
|
914
|
-
with Live(
|
|
915
|
-
Panel("", title="Response", border_style="green", expand=False),
|
|
916
|
-
refresh_per_second=4,
|
|
917
|
-
auto_refresh=True,
|
|
918
|
-
vertical_overflow="visible",
|
|
919
|
-
console=Console(force_terminal=True, color_system="auto", height=None)
|
|
920
|
-
) as live:
|
|
921
|
-
live.update(
|
|
922
|
-
Panel(
|
|
923
|
-
Markdown(markdown_content),
|
|
924
|
-
title="Response",
|
|
925
|
-
border_style="green",
|
|
926
|
-
expand=False,
|
|
927
|
-
)
|
|
928
|
-
)
|
|
929
|
-
|
|
896
|
+
from autocoder.agent.entry_command_agent import ProjectReaderAgent
|
|
897
|
+
|
|
898
|
+
project_reader_agent = ProjectReaderAgent(args, llm, raw_args)
|
|
899
|
+
project_reader_agent.run()
|
|
930
900
|
return
|
|
931
901
|
elif raw_args.agent_command == "voice2text":
|
|
932
|
-
from autocoder.
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
temp_wav_file = os.path.join(
|
|
937
|
-
tempfile.gettempdir(), "voice_input.wav")
|
|
938
|
-
|
|
939
|
-
console = Console()
|
|
940
|
-
|
|
941
|
-
transcribe_audio.record_audio(temp_wav_file)
|
|
942
|
-
console.print(
|
|
943
|
-
Panel(
|
|
944
|
-
"Recording finished. Transcribing...",
|
|
945
|
-
title="Voice",
|
|
946
|
-
border_style="green",
|
|
947
|
-
)
|
|
948
|
-
)
|
|
949
|
-
|
|
950
|
-
if llm and llm.get_sub_client("voice2text_model"):
|
|
951
|
-
voice2text_llm = llm.get_sub_client("voice2text_model")
|
|
952
|
-
else:
|
|
953
|
-
voice2text_llm = llm
|
|
954
|
-
transcription = transcribe_audio.transcribe_audio(
|
|
955
|
-
temp_wav_file, voice2text_llm
|
|
956
|
-
)
|
|
957
|
-
|
|
958
|
-
console.print(
|
|
959
|
-
Panel(
|
|
960
|
-
f"Transcription: <_transcription_>{transcription}</_transcription_>",
|
|
961
|
-
title="Result",
|
|
962
|
-
border_style="magenta",
|
|
963
|
-
)
|
|
964
|
-
)
|
|
965
|
-
|
|
966
|
-
with open(os.path.join(".auto-coder", "exchange.txt"), "w",encoding="utf-8") as f:
|
|
967
|
-
f.write(transcription)
|
|
968
|
-
|
|
969
|
-
request_queue.add_request(
|
|
970
|
-
args.request_id,
|
|
971
|
-
RequestValue(
|
|
972
|
-
value=DefaultValue(value=transcription),
|
|
973
|
-
status=RequestOption.COMPLETED,
|
|
974
|
-
),
|
|
975
|
-
)
|
|
976
|
-
|
|
977
|
-
os.remove(temp_wav_file)
|
|
902
|
+
from autocoder.agent.entry_command_agent import Voice2TextAgent
|
|
903
|
+
|
|
904
|
+
voice2text_agent = Voice2TextAgent(args, llm, raw_args)
|
|
905
|
+
voice2text_agent.run()
|
|
978
906
|
return
|
|
979
907
|
elif raw_args.agent_command == "generate_command":
|
|
980
|
-
from autocoder.
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
shell_script = generate_shell_script(args, llm)
|
|
985
|
-
|
|
986
|
-
console.print(
|
|
987
|
-
Panel(
|
|
988
|
-
shell_script,
|
|
989
|
-
title="Shell Script",
|
|
990
|
-
border_style="magenta",
|
|
991
|
-
)
|
|
992
|
-
)
|
|
993
|
-
|
|
994
|
-
with open(os.path.join(".auto-coder", "exchange.txt"), "w",encoding="utf-8") as f:
|
|
995
|
-
f.write(shell_script)
|
|
996
|
-
|
|
997
|
-
request_queue.add_request(
|
|
998
|
-
args.request_id,
|
|
999
|
-
RequestValue(
|
|
1000
|
-
value=DefaultValue(value=shell_script),
|
|
1001
|
-
status=RequestOption.COMPLETED,
|
|
1002
|
-
),
|
|
1003
|
-
)
|
|
1004
|
-
|
|
908
|
+
from autocoder.agent.entry_command_agent import GenerateCommandAgent
|
|
909
|
+
|
|
910
|
+
generate_command_agent = GenerateCommandAgent(args, llm, raw_args)
|
|
911
|
+
generate_command_agent.run()
|
|
1005
912
|
return
|
|
1006
913
|
elif raw_args.agent_command == "auto_tool":
|
|
1007
|
-
from autocoder.agent.
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
if args.request_id:
|
|
1012
|
-
request_queue.add_request(
|
|
1013
|
-
args.request_id,
|
|
1014
|
-
RequestValue(
|
|
1015
|
-
value=DefaultValue(value=v), status=RequestOption.COMPLETED
|
|
1016
|
-
),
|
|
1017
|
-
)
|
|
1018
|
-
console = Console()
|
|
1019
|
-
markdown_content = v
|
|
1020
|
-
|
|
1021
|
-
with Live(
|
|
1022
|
-
Panel("", title="Response", border_style="green", expand=False),
|
|
1023
|
-
refresh_per_second=4,
|
|
1024
|
-
auto_refresh=True,
|
|
1025
|
-
vertical_overflow="visible",
|
|
1026
|
-
console=Console(force_terminal=True, color_system="auto", height=None)
|
|
1027
|
-
) as live:
|
|
1028
|
-
live.update(
|
|
1029
|
-
Panel(
|
|
1030
|
-
Markdown(markdown_content),
|
|
1031
|
-
title="Response",
|
|
1032
|
-
border_style="green",
|
|
1033
|
-
expand=False,
|
|
1034
|
-
)
|
|
1035
|
-
)
|
|
1036
|
-
|
|
914
|
+
from autocoder.agent.entry_command_agent import AutoToolAgent
|
|
915
|
+
|
|
916
|
+
auto_tool_agent = AutoToolAgent(args, llm, raw_args)
|
|
917
|
+
auto_tool_agent.run()
|
|
1037
918
|
return
|
|
1038
919
|
elif raw_args.agent_command == "designer":
|
|
1039
|
-
from autocoder.agent.
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
designer.run(args.query)
|
|
1044
|
-
print("Successfully generated image in output.png")
|
|
1045
|
-
elif args.agent_designer_mode == "sd":
|
|
1046
|
-
designer = SDDesigner(args, llm)
|
|
1047
|
-
designer.run(args.query)
|
|
1048
|
-
print("Successfully generated image in output.jpg")
|
|
1049
|
-
elif args.agent_designer_mode.startswith("logo"):
|
|
1050
|
-
designer = LogoDesigner(args, llm)
|
|
1051
|
-
designer.run(args.query)
|
|
1052
|
-
print("Successfully generated image in output.png")
|
|
1053
|
-
if args.request_id:
|
|
1054
|
-
request_queue.add_request(
|
|
1055
|
-
args.request_id,
|
|
1056
|
-
RequestValue(
|
|
1057
|
-
value=DefaultValue(
|
|
1058
|
-
value="Successfully generated image"),
|
|
1059
|
-
status=RequestOption.COMPLETED,
|
|
1060
|
-
),
|
|
1061
|
-
)
|
|
920
|
+
from autocoder.agent.entry_command_agent import DesignerAgent
|
|
921
|
+
|
|
922
|
+
designer_agent = DesignerAgent(args, llm, raw_args)
|
|
923
|
+
designer_agent.run()
|
|
1062
924
|
return
|
|
1063
925
|
|
|
1064
926
|
elif raw_args.agent_command == "chat":
|
|
927
|
+
from autocoder.agent.entry_command_agent import ChatAgent
|
|
1065
928
|
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
if isinstance(args.action, dict):
|
|
1069
|
-
commands_info = args.action
|
|
1070
|
-
else:
|
|
1071
|
-
commands_info = {}
|
|
1072
|
-
for command in args.action:
|
|
1073
|
-
commands_info[command] = {}
|
|
1074
|
-
|
|
1075
|
-
memory_dir = os.path.join(args.source_dir, ".auto-coder", "memory")
|
|
1076
|
-
os.makedirs(memory_dir, exist_ok=True)
|
|
1077
|
-
memory_file = os.path.join(memory_dir, "chat_history.json")
|
|
1078
|
-
console = Console()
|
|
1079
|
-
result_manager = ResultManager()
|
|
1080
|
-
if args.new_session:
|
|
1081
|
-
if os.path.exists(memory_file):
|
|
1082
|
-
with open(memory_file, "r",encoding="utf-8") as f:
|
|
1083
|
-
old_chat_history = json.load(f)
|
|
1084
|
-
if "conversation_history" not in old_chat_history:
|
|
1085
|
-
old_chat_history["conversation_history"] = []
|
|
1086
|
-
old_chat_history["conversation_history"].append(
|
|
1087
|
-
old_chat_history.get("ask_conversation", []))
|
|
1088
|
-
chat_history = {"ask_conversation": [
|
|
1089
|
-
], "conversation_history": old_chat_history["conversation_history"]}
|
|
1090
|
-
else:
|
|
1091
|
-
chat_history = {"ask_conversation": [],
|
|
1092
|
-
"conversation_history": []}
|
|
1093
|
-
with open(memory_file, "w",encoding="utf-8") as f:
|
|
1094
|
-
json.dump(chat_history, f, ensure_ascii=False)
|
|
1095
|
-
|
|
1096
|
-
result_manager.add_result(content=get_message("new_session_started"), meta={
|
|
1097
|
-
"action": "chat",
|
|
1098
|
-
"input": {
|
|
1099
|
-
"query": args.query
|
|
1100
|
-
}
|
|
1101
|
-
})
|
|
1102
|
-
console.print(
|
|
1103
|
-
Panel(
|
|
1104
|
-
get_message("new_session_started"),
|
|
1105
|
-
title="Session Status",
|
|
1106
|
-
expand=False,
|
|
1107
|
-
border_style="green",
|
|
1108
|
-
)
|
|
1109
|
-
)
|
|
1110
|
-
if not args.query or (args.query_prefix and args.query == args.query_prefix) or (args.query_suffix and args.query == args.query_suffix):
|
|
1111
|
-
return
|
|
1112
|
-
|
|
1113
|
-
if os.path.exists(memory_file):
|
|
1114
|
-
with open(memory_file, "r",encoding="utf-8") as f:
|
|
1115
|
-
chat_history = json.load(f)
|
|
1116
|
-
if "conversation_history" not in chat_history:
|
|
1117
|
-
chat_history["conversation_history"] = []
|
|
1118
|
-
else:
|
|
1119
|
-
chat_history = {"ask_conversation": [],
|
|
1120
|
-
"conversation_history": []}
|
|
1121
|
-
|
|
1122
|
-
chat_history["ask_conversation"].append(
|
|
1123
|
-
{"role": "user", "content": args.query}
|
|
1124
|
-
)
|
|
1125
|
-
|
|
1126
|
-
if llm.get_sub_client("chat_model"):
|
|
1127
|
-
chat_llm = llm.get_sub_client("chat_model")
|
|
1128
|
-
else:
|
|
1129
|
-
chat_llm = llm
|
|
1130
|
-
|
|
1131
|
-
source_count = 0
|
|
1132
|
-
pre_conversations = []
|
|
1133
|
-
context_content = args.context if args.context else ""
|
|
1134
|
-
if args.context:
|
|
1135
|
-
try:
|
|
1136
|
-
context = json.loads(args.context)
|
|
1137
|
-
if "file_content" in context:
|
|
1138
|
-
context_content = context["file_content"]
|
|
1139
|
-
except:
|
|
1140
|
-
pass
|
|
1141
|
-
|
|
1142
|
-
pre_conversations.append(
|
|
1143
|
-
{
|
|
1144
|
-
"role": "user",
|
|
1145
|
-
"content": f"请阅读下面的代码和文档:\n\n <files>\n{context_content}\n</files>",
|
|
1146
|
-
},
|
|
1147
|
-
)
|
|
1148
|
-
pre_conversations.append(
|
|
1149
|
-
{"role": "assistant", "content": "read"})
|
|
1150
|
-
source_count += 1
|
|
1151
|
-
|
|
1152
|
-
from autocoder.index.index import IndexManager
|
|
1153
|
-
from autocoder.index.entry import build_index_and_filter_files
|
|
1154
|
-
from autocoder.pyproject import PyProject
|
|
1155
|
-
from autocoder.tsproject import TSProject
|
|
1156
|
-
from autocoder.suffixproject import SuffixProject
|
|
1157
|
-
|
|
1158
|
-
if args.project_type == "ts":
|
|
1159
|
-
pp = TSProject(args=args, llm=llm)
|
|
1160
|
-
elif args.project_type == "py":
|
|
1161
|
-
pp = PyProject(args=args, llm=llm)
|
|
1162
|
-
else:
|
|
1163
|
-
pp = SuffixProject(args=args, llm=llm, file_filter=None)
|
|
1164
|
-
pp.run()
|
|
1165
|
-
sources = pp.sources
|
|
1166
|
-
|
|
1167
|
-
# Apply model filter for chat_llm
|
|
1168
|
-
model_filter = ModelPathFilter.from_model_object(chat_llm, args)
|
|
1169
|
-
filtered_sources = []
|
|
1170
|
-
printer = Printer()
|
|
1171
|
-
for source in sources:
|
|
1172
|
-
if model_filter.is_accessible(source.module_name):
|
|
1173
|
-
filtered_sources.append(source)
|
|
1174
|
-
else:
|
|
1175
|
-
printer.print_in_terminal("index_file_filtered",
|
|
1176
|
-
style="yellow",
|
|
1177
|
-
file_path=source.module_name,
|
|
1178
|
-
model_name=",".join(get_llm_names(chat_llm)))
|
|
1179
|
-
|
|
1180
|
-
if "no_context" not in commands_info:
|
|
1181
|
-
s = build_index_and_filter_files(
|
|
1182
|
-
llm=llm, args=args, sources=filtered_sources).to_str()
|
|
1183
|
-
|
|
1184
|
-
if s:
|
|
1185
|
-
pre_conversations.append(
|
|
1186
|
-
{
|
|
1187
|
-
"role": "user",
|
|
1188
|
-
"content": f"请阅读下面的代码和文档:\n\n <files>\n{s}\n</files>",
|
|
1189
|
-
}
|
|
1190
|
-
)
|
|
1191
|
-
pre_conversations.append(
|
|
1192
|
-
{"role": "assistant", "content": "read"})
|
|
1193
|
-
source_count += 1
|
|
1194
|
-
|
|
1195
|
-
loaded_conversations = pre_conversations + \
|
|
1196
|
-
chat_history["ask_conversation"]
|
|
1197
|
-
|
|
1198
|
-
if get_run_context().mode != RunMode.WEB and args.human_as_model:
|
|
1199
|
-
console = Console()
|
|
1200
|
-
|
|
1201
|
-
@byzerllm.prompt()
|
|
1202
|
-
def chat_with_human_as_model(
|
|
1203
|
-
source_codes, pre_conversations, last_conversation
|
|
1204
|
-
):
|
|
1205
|
-
"""
|
|
1206
|
-
{% if source_codes %}
|
|
1207
|
-
{{ source_codes }}
|
|
1208
|
-
{% endif %}
|
|
1209
|
-
|
|
1210
|
-
{% if pre_conversations %}
|
|
1211
|
-
下面是我们之间的历史对话,假设我是A,你是B。
|
|
1212
|
-
<conversations>
|
|
1213
|
-
{% for conv in pre_conversations %}
|
|
1214
|
-
{{ "A" if conv.role == "user" else "B" }}: {{ conv.content }}
|
|
1215
|
-
{% endfor %}
|
|
1216
|
-
</conversations>
|
|
1217
|
-
{% endif %}
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
参考上面的文件以及历史对话,回答用户的问题。
|
|
1221
|
-
用户的问题: {{ last_conversation.content }}
|
|
1222
|
-
"""
|
|
1223
|
-
|
|
1224
|
-
source_codes_conversations = loaded_conversations[0: source_count * 2]
|
|
1225
|
-
source_codes = ""
|
|
1226
|
-
for conv in source_codes_conversations:
|
|
1227
|
-
if conv["role"] == "user":
|
|
1228
|
-
source_codes += conv["content"]
|
|
1229
|
-
|
|
1230
|
-
chat_content = chat_with_human_as_model.prompt(
|
|
1231
|
-
source_codes=source_codes,
|
|
1232
|
-
pre_conversations=loaded_conversations[source_count * 2: -1],
|
|
1233
|
-
last_conversation=loaded_conversations[-1],
|
|
1234
|
-
)
|
|
1235
|
-
|
|
1236
|
-
with open(args.target_file, "w",encoding="utf-8") as f:
|
|
1237
|
-
f.write(chat_content)
|
|
1238
|
-
|
|
1239
|
-
try:
|
|
1240
|
-
import pyperclip
|
|
1241
|
-
|
|
1242
|
-
pyperclip.copy(chat_content)
|
|
1243
|
-
console.print(
|
|
1244
|
-
Panel(
|
|
1245
|
-
get_message("chat_human_as_model_instructions"),
|
|
1246
|
-
title="Instructions",
|
|
1247
|
-
border_style="blue",
|
|
1248
|
-
expand=False,
|
|
1249
|
-
)
|
|
1250
|
-
)
|
|
1251
|
-
except Exception:
|
|
1252
|
-
logger.warning(get_message("clipboard_not_supported"))
|
|
1253
|
-
console.print(
|
|
1254
|
-
Panel(
|
|
1255
|
-
get_message(
|
|
1256
|
-
"human_as_model_instructions_no_clipboard"),
|
|
1257
|
-
title="Instructions",
|
|
1258
|
-
border_style="blue",
|
|
1259
|
-
expand=False,
|
|
1260
|
-
)
|
|
1261
|
-
)
|
|
1262
|
-
|
|
1263
|
-
lines = []
|
|
1264
|
-
while True:
|
|
1265
|
-
line = prompt(FormattedText(
|
|
1266
|
-
[("#00FF00", "> ")]), multiline=False)
|
|
1267
|
-
line_lower = line.strip().lower()
|
|
1268
|
-
if line_lower in ["eof", "/eof"]:
|
|
1269
|
-
break
|
|
1270
|
-
elif line_lower in ["/clear"]:
|
|
1271
|
-
lines = []
|
|
1272
|
-
print("\033[2J\033[H") # Clear terminal screen
|
|
1273
|
-
continue
|
|
1274
|
-
elif line_lower in ["/break"]:
|
|
1275
|
-
raise Exception(
|
|
1276
|
-
"User requested to break the operation.")
|
|
1277
|
-
lines.append(line)
|
|
1278
|
-
|
|
1279
|
-
result = "\n".join(lines)
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
result_manager = ResultManager()
|
|
1283
|
-
result_manager.append(content=result,
|
|
1284
|
-
meta={"action": "chat","input":{
|
|
1285
|
-
"query":args.query
|
|
1286
|
-
}})
|
|
1287
|
-
|
|
1288
|
-
# Update chat history with user's response
|
|
1289
|
-
chat_history["ask_conversation"].append(
|
|
1290
|
-
{"role": "assistant", "content": result}
|
|
1291
|
-
)
|
|
1292
|
-
|
|
1293
|
-
with open(memory_file, "w",encoding="utf-8") as f:
|
|
1294
|
-
json.dump(chat_history, f, ensure_ascii=False)
|
|
1295
|
-
|
|
1296
|
-
if "save" in commands_info:
|
|
1297
|
-
save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
1298
|
-
query=args.query,
|
|
1299
|
-
response=result)
|
|
1300
|
-
printer = Printer()
|
|
1301
|
-
printer.print_in_terminal("memory_save_success")
|
|
1302
|
-
return {}
|
|
1303
|
-
|
|
1304
|
-
# 计算耗时
|
|
1305
|
-
start_time = time.time()
|
|
1306
|
-
commit_file_name = None
|
|
1307
|
-
|
|
1308
|
-
if "rag" in commands_info:
|
|
1309
|
-
from autocoder.rag.rag_entry import RAGFactory
|
|
1310
|
-
args.enable_rag_search = True
|
|
1311
|
-
args.enable_rag_context = False
|
|
1312
|
-
rag = RAGFactory.get_rag(llm=chat_llm, args=args, path="")
|
|
1313
|
-
response = rag.stream_chat_oai(
|
|
1314
|
-
conversations=loaded_conversations)[0]
|
|
1315
|
-
v = (item for item in response)
|
|
1316
|
-
|
|
1317
|
-
elif "mcp" in commands_info:
|
|
1318
|
-
mcp_server = get_mcp_server()
|
|
1319
|
-
|
|
1320
|
-
pos_args = commands_info["mcp"].get("args", [])
|
|
1321
|
-
final_query = pos_args[0] if pos_args else args.query
|
|
1322
|
-
response = mcp_server.send_request(
|
|
1323
|
-
McpRequest(
|
|
1324
|
-
query=final_query,
|
|
1325
|
-
model=args.inference_model or args.model,
|
|
1326
|
-
product_mode=args.product_mode
|
|
1327
|
-
)
|
|
1328
|
-
)
|
|
1329
|
-
v = [[response.result,None]]
|
|
1330
|
-
elif "review" in commands_info:
|
|
1331
|
-
from autocoder.agent.auto_review_commit import AutoReviewCommit
|
|
1332
|
-
reviewer = AutoReviewCommit(llm=chat_llm, args=args)
|
|
1333
|
-
pos_args = commands_info["review"].get("args", [])
|
|
1334
|
-
final_query = pos_args[0] if pos_args else args.query
|
|
1335
|
-
kwargs = commands_info["review"].get("kwargs", {})
|
|
1336
|
-
commit_id = kwargs.get("commit", None)
|
|
1337
|
-
v = reviewer.review_commit(query=final_query, conversations=loaded_conversations, commit_id=commit_id)
|
|
1338
|
-
elif "learn" in commands_info:
|
|
1339
|
-
from autocoder.agent.auto_learn_from_commit import AutoLearnFromCommit
|
|
1340
|
-
learner = AutoLearnFromCommit(llm=chat_llm, args=args)
|
|
1341
|
-
pos_args = commands_info["learn"].get("args", [])
|
|
1342
|
-
final_query = pos_args[0] if pos_args else args.query
|
|
1343
|
-
v,tmp_file_name = learner.learn_from_commit(query=final_query,conversations=loaded_conversations)
|
|
1344
|
-
commit_file_name = tmp_file_name
|
|
1345
|
-
else:
|
|
1346
|
-
# 预估token数量
|
|
1347
|
-
dumped_conversations = json.dumps(loaded_conversations, ensure_ascii=False)
|
|
1348
|
-
estimated_input_tokens = count_tokens(dumped_conversations)
|
|
1349
|
-
printer = Printer()
|
|
1350
|
-
printer.print_in_terminal("estimated_chat_input_tokens", style="yellow",
|
|
1351
|
-
estimated_input_tokens=estimated_input_tokens
|
|
1352
|
-
)
|
|
1353
|
-
|
|
1354
|
-
# with open("/tmp/output.txt", "w",encoding="utf-8") as f:
|
|
1355
|
-
# f.write(json.dumps(loaded_conversations, ensure_ascii=False, indent=4))
|
|
1356
|
-
|
|
1357
|
-
v = stream_chat_with_continue(
|
|
1358
|
-
llm=chat_llm,
|
|
1359
|
-
conversations=loaded_conversations,
|
|
1360
|
-
llm_config={},
|
|
1361
|
-
args=args
|
|
1362
|
-
)
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
model_name = ",".join(get_llm_names(chat_llm))
|
|
1366
|
-
|
|
1367
|
-
assistant_response, last_meta = stream_out(
|
|
1368
|
-
v,
|
|
1369
|
-
request_id=args.request_id,
|
|
1370
|
-
console=console,
|
|
1371
|
-
model_name=model_name,
|
|
1372
|
-
args=args
|
|
1373
|
-
)
|
|
1374
|
-
|
|
1375
|
-
result_manager = ResultManager()
|
|
1376
|
-
result_manager.append(content=assistant_response, meta={
|
|
1377
|
-
"action": "chat",
|
|
1378
|
-
"input": {
|
|
1379
|
-
"query": args.query
|
|
1380
|
-
}
|
|
1381
|
-
})
|
|
1382
|
-
|
|
1383
|
-
if "learn" in commands_info:
|
|
1384
|
-
if commit_file_name:
|
|
1385
|
-
# 使用 ActionYmlFileManager 更新 YAML 文件
|
|
1386
|
-
action_manager = ActionYmlFileManager(args.source_dir)
|
|
1387
|
-
if not action_manager.update_yaml_field(commit_file_name, 'how_to_reproduce', assistant_response):
|
|
1388
|
-
printer = Printer()
|
|
1389
|
-
printer.print_in_terminal("yaml_save_error", style="red", yaml_file=commit_file_name)
|
|
1390
|
-
|
|
1391
|
-
# 打印耗时和token统计
|
|
1392
|
-
if last_meta:
|
|
1393
|
-
elapsed_time = time.time() - start_time
|
|
1394
|
-
printer = Printer()
|
|
1395
|
-
speed = last_meta.generated_tokens_count / elapsed_time
|
|
1396
|
-
|
|
1397
|
-
# Get model info for pricing
|
|
1398
|
-
from autocoder.utils import llms as llm_utils
|
|
1399
|
-
model_info = llm_utils.get_model_info(model_name, args.product_mode) or {}
|
|
1400
|
-
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
1401
|
-
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
1402
|
-
|
|
1403
|
-
# Calculate costs
|
|
1404
|
-
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
|
|
1405
|
-
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
|
|
1406
|
-
|
|
1407
|
-
printer.print_in_terminal("stream_out_stats",
|
|
1408
|
-
model_name=model_name,
|
|
1409
|
-
elapsed_time=elapsed_time,
|
|
1410
|
-
first_token_time=last_meta.first_token_time,
|
|
1411
|
-
input_tokens=last_meta.input_tokens_count,
|
|
1412
|
-
output_tokens=last_meta.generated_tokens_count,
|
|
1413
|
-
input_cost=round(input_cost, 4),
|
|
1414
|
-
output_cost=round(output_cost, 4),
|
|
1415
|
-
speed=round(speed, 2))
|
|
1416
|
-
get_event_manager(args.event_file).write_result(
|
|
1417
|
-
EventContentCreator.create_result(content=EventContentCreator.ResultTokenStatContent(
|
|
1418
|
-
model_name=model_name,
|
|
1419
|
-
elapsed_time=elapsed_time,
|
|
1420
|
-
input_tokens=last_meta.input_tokens_count,
|
|
1421
|
-
output_tokens=last_meta.generated_tokens_count,
|
|
1422
|
-
input_cost=round(input_cost, 4),
|
|
1423
|
-
output_cost=round(output_cost, 4),
|
|
1424
|
-
speed=round(speed, 2)
|
|
1425
|
-
)).to_dict(), metadata=EventMetadata(
|
|
1426
|
-
action_file=args.file
|
|
1427
|
-
).to_dict())
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
chat_history["ask_conversation"].append(
|
|
1431
|
-
{"role": "assistant", "content": assistant_response}
|
|
1432
|
-
)
|
|
1433
|
-
|
|
1434
|
-
with open(memory_file, "w",encoding="utf-8") as f:
|
|
1435
|
-
json.dump(chat_history, f, ensure_ascii=False)
|
|
1436
|
-
|
|
1437
|
-
if "copy" in commands_info:
|
|
1438
|
-
#copy assistant_response to clipboard
|
|
1439
|
-
import pyperclip
|
|
1440
|
-
try:
|
|
1441
|
-
pyperclip.copy(assistant_response)
|
|
1442
|
-
except:
|
|
1443
|
-
print("pyperclip not installed or clipboard is not supported, instruction will not be copied to clipboard.")
|
|
1444
|
-
|
|
1445
|
-
if "save" in commands_info:
|
|
1446
|
-
tmp_dir = save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
1447
|
-
query=args.query,
|
|
1448
|
-
response=assistant_response)
|
|
1449
|
-
printer = Printer()
|
|
1450
|
-
printer.print_in_terminal("memory_save_success", style="green", path=tmp_dir)
|
|
1451
|
-
|
|
1452
|
-
if len(commands_info["save"]["args"]) > 0:
|
|
1453
|
-
# 保存到指定文件
|
|
1454
|
-
with open(commands_info["save"]["args"][0], "w",encoding="utf-8") as f:
|
|
1455
|
-
f.write(assistant_response)
|
|
929
|
+
chat_agent = ChatAgent(args, llm, raw_args)
|
|
930
|
+
chat_agent.run()
|
|
1456
931
|
return
|
|
1457
932
|
|
|
1458
933
|
else:
|