auto-coder 0.1.398__py3-none-any.whl → 0.1.400__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- auto_coder-0.1.400.dist-info/METADATA +396 -0
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.400.dist-info}/RECORD +82 -29
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.400.dist-info}/WHEEL +1 -1
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.400.dist-info}/entry_points.txt +2 -0
- autocoder/agent/base_agentic/base_agent.py +2 -2
- autocoder/agent/base_agentic/tools/replace_in_file_tool_resolver.py +1 -1
- autocoder/agent/entry_command_agent/__init__.py +29 -0
- autocoder/agent/entry_command_agent/auto_tool.py +61 -0
- autocoder/agent/entry_command_agent/chat.py +475 -0
- autocoder/agent/entry_command_agent/designer.py +53 -0
- autocoder/agent/entry_command_agent/generate_command.py +50 -0
- autocoder/agent/entry_command_agent/project_reader.py +58 -0
- autocoder/agent/entry_command_agent/voice2text.py +71 -0
- autocoder/auto_coder.py +23 -548
- autocoder/auto_coder_runner.py +511 -8
- autocoder/chat/rules_command.py +1 -1
- autocoder/chat_auto_coder.py +6 -1
- autocoder/common/ac_style_command_parser/__init__.py +15 -0
- autocoder/common/ac_style_command_parser/example.py +7 -0
- autocoder/{command_parser.py → common/ac_style_command_parser/parser.py} +28 -45
- autocoder/common/ac_style_command_parser/test_parser.py +516 -0
- autocoder/common/auto_coder_lang.py +78 -0
- autocoder/common/command_completer_v2.py +1 -1
- autocoder/common/command_file_manager/examples.py +22 -8
- autocoder/common/command_file_manager/manager.py +37 -6
- autocoder/common/conversations/get_conversation_manager.py +143 -0
- autocoder/common/conversations/manager.py +122 -11
- autocoder/common/conversations/storage/index_manager.py +89 -0
- autocoder/common/pull_requests/__init__.py +256 -0
- autocoder/common/pull_requests/base_provider.py +191 -0
- autocoder/common/pull_requests/config.py +66 -0
- autocoder/common/pull_requests/example.py +1 -0
- autocoder/common/pull_requests/exceptions.py +46 -0
- autocoder/common/pull_requests/manager.py +201 -0
- autocoder/common/pull_requests/models.py +164 -0
- autocoder/common/pull_requests/providers/__init__.py +23 -0
- autocoder/common/pull_requests/providers/gitcode_provider.py +19 -0
- autocoder/common/pull_requests/providers/gitee_provider.py +20 -0
- autocoder/common/pull_requests/providers/github_provider.py +214 -0
- autocoder/common/pull_requests/providers/gitlab_provider.py +29 -0
- autocoder/common/pull_requests/test_module.py +1 -0
- autocoder/common/pull_requests/utils.py +344 -0
- autocoder/common/tokens/__init__.py +62 -0
- autocoder/common/tokens/counter.py +211 -0
- autocoder/common/tokens/file_detector.py +105 -0
- autocoder/common/tokens/filters.py +111 -0
- autocoder/common/tokens/models.py +28 -0
- autocoder/common/v2/agent/agentic_edit.py +312 -85
- autocoder/common/v2/agent/agentic_edit_types.py +11 -0
- autocoder/common/v2/code_auto_generate_editblock.py +10 -2
- autocoder/dispacher/__init__.py +10 -0
- autocoder/rags.py +0 -27
- autocoder/run_context.py +1 -0
- autocoder/sdk/__init__.py +188 -0
- autocoder/sdk/cli/__init__.py +15 -0
- autocoder/sdk/cli/__main__.py +26 -0
- autocoder/sdk/cli/completion_wrapper.py +38 -0
- autocoder/sdk/cli/formatters.py +211 -0
- autocoder/sdk/cli/handlers.py +175 -0
- autocoder/sdk/cli/install_completion.py +301 -0
- autocoder/sdk/cli/main.py +286 -0
- autocoder/sdk/cli/options.py +73 -0
- autocoder/sdk/constants.py +102 -0
- autocoder/sdk/core/__init__.py +20 -0
- autocoder/sdk/core/auto_coder_core.py +880 -0
- autocoder/sdk/core/bridge.py +500 -0
- autocoder/sdk/example.py +0 -0
- autocoder/sdk/exceptions.py +72 -0
- autocoder/sdk/models/__init__.py +19 -0
- autocoder/sdk/models/messages.py +209 -0
- autocoder/sdk/models/options.py +196 -0
- autocoder/sdk/models/responses.py +311 -0
- autocoder/sdk/session/__init__.py +32 -0
- autocoder/sdk/session/session.py +106 -0
- autocoder/sdk/session/session_manager.py +56 -0
- autocoder/sdk/utils/__init__.py +24 -0
- autocoder/sdk/utils/formatters.py +216 -0
- autocoder/sdk/utils/io_utils.py +302 -0
- autocoder/sdk/utils/validators.py +287 -0
- autocoder/version.py +2 -1
- auto_coder-0.1.398.dist-info/METADATA +0 -111
- autocoder/common/conversations/compatibility.py +0 -303
- autocoder/common/conversations/conversation_manager.py +0 -502
- autocoder/common/conversations/example.py +0 -152
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.400.dist-info/licenses}/LICENSE +0 -0
- {auto_coder-0.1.398.dist-info → auto_coder-0.1.400.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Entry Command Agent 模块
|
|
3
|
+
|
|
4
|
+
该模块包含各种入口命令的代理实现,用于处理不同类型的用户命令。
|
|
5
|
+
|
|
6
|
+
目前包含的代理:
|
|
7
|
+
- ChatAgent: 处理聊天命令的代理
|
|
8
|
+
- ProjectReaderAgent: 处理项目阅读命令的代理
|
|
9
|
+
- Voice2TextAgent: 处理语音转文字命令的代理
|
|
10
|
+
- GenerateCommandAgent: 处理命令生成的代理
|
|
11
|
+
- AutoToolAgent: 处理自动工具命令的代理
|
|
12
|
+
- DesignerAgent: 处理设计命令的代理
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from .chat import ChatAgent
|
|
16
|
+
from .project_reader import ProjectReaderAgent
|
|
17
|
+
from .voice2text import Voice2TextAgent
|
|
18
|
+
from .generate_command import GenerateCommandAgent
|
|
19
|
+
from .auto_tool import AutoToolAgent
|
|
20
|
+
from .designer import DesignerAgent
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
'ChatAgent',
|
|
24
|
+
'ProjectReaderAgent',
|
|
25
|
+
'Voice2TextAgent',
|
|
26
|
+
'GenerateCommandAgent',
|
|
27
|
+
'AutoToolAgent',
|
|
28
|
+
'DesignerAgent'
|
|
29
|
+
]
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
from rich.markdown import Markdown
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
|
|
11
|
+
from autocoder.utils.request_queue import (
|
|
12
|
+
request_queue,
|
|
13
|
+
RequestValue,
|
|
14
|
+
DefaultValue,
|
|
15
|
+
RequestOption,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AutoToolAgent:
|
|
20
|
+
def __init__(self, args, llm, raw_args):
|
|
21
|
+
self.args = args
|
|
22
|
+
self.llm = llm
|
|
23
|
+
self.raw_args = raw_args
|
|
24
|
+
self.console = Console()
|
|
25
|
+
|
|
26
|
+
def run(self):
|
|
27
|
+
"""执行 auto_tool 命令的主要逻辑"""
|
|
28
|
+
from autocoder.agent.auto_tool import AutoTool
|
|
29
|
+
|
|
30
|
+
auto_tool = AutoTool(self.args, self.llm)
|
|
31
|
+
v = auto_tool.run(self.args.query)
|
|
32
|
+
|
|
33
|
+
if self.args.request_id:
|
|
34
|
+
request_queue.add_request(
|
|
35
|
+
self.args.request_id,
|
|
36
|
+
RequestValue(
|
|
37
|
+
value=DefaultValue(value=v), status=RequestOption.COMPLETED
|
|
38
|
+
),
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
markdown_content = v
|
|
42
|
+
|
|
43
|
+
with Live(
|
|
44
|
+
Panel("", title="Response", border_style="green", expand=False),
|
|
45
|
+
refresh_per_second=4,
|
|
46
|
+
auto_refresh=True,
|
|
47
|
+
vertical_overflow="visible",
|
|
48
|
+
console=Console(force_terminal=True, color_system="auto", height=None)
|
|
49
|
+
) as live:
|
|
50
|
+
live.update(
|
|
51
|
+
Panel(
|
|
52
|
+
Markdown(markdown_content),
|
|
53
|
+
title="Response",
|
|
54
|
+
border_style="green",
|
|
55
|
+
expand=False,
|
|
56
|
+
)
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
|
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
import json
|
|
5
|
+
from typing import Dict, Any, List
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
from rich.markdown import Markdown
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from prompt_toolkit import prompt
|
|
11
|
+
from prompt_toolkit.formatted_text import FormattedText
|
|
12
|
+
from loguru import logger
|
|
13
|
+
import byzerllm
|
|
14
|
+
|
|
15
|
+
from autocoder.common.auto_coder_lang import get_message
|
|
16
|
+
from autocoder.common.memory_manager import save_to_memory_file
|
|
17
|
+
from autocoder.common.utils_code_auto_generate import stream_chat_with_continue
|
|
18
|
+
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
19
|
+
from autocoder.common.printer import Printer
|
|
20
|
+
from autocoder.rag.token_counter import count_tokens
|
|
21
|
+
from autocoder.privacy.model_filter import ModelPathFilter
|
|
22
|
+
from autocoder.common.result_manager import ResultManager
|
|
23
|
+
from autocoder.events.event_manager_singleton import get_event_manager
|
|
24
|
+
from autocoder.events import event_content as EventContentCreator
|
|
25
|
+
from autocoder.events.event_types import EventMetadata
|
|
26
|
+
from autocoder.common.mcp_server import get_mcp_server
|
|
27
|
+
from autocoder.common.mcp_server_types import McpRequest
|
|
28
|
+
from autocoder.utils.llms import get_llm_names
|
|
29
|
+
from autocoder.utils.request_queue import (
|
|
30
|
+
request_queue,
|
|
31
|
+
RequestValue,
|
|
32
|
+
DefaultValue,
|
|
33
|
+
RequestOption,
|
|
34
|
+
)
|
|
35
|
+
from autocoder.run_context import get_run_context, RunMode
|
|
36
|
+
from autocoder.common.action_yml_file_manager import ActionYmlFileManager
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ChatAgent:
|
|
40
|
+
def __init__(self, args, llm, raw_args):
|
|
41
|
+
self.args = args
|
|
42
|
+
self.llm = llm
|
|
43
|
+
self.raw_args = raw_args
|
|
44
|
+
self.console = Console()
|
|
45
|
+
self.result_manager = ResultManager()
|
|
46
|
+
|
|
47
|
+
def run(self):
|
|
48
|
+
"""执行 chat 命令的主要逻辑"""
|
|
49
|
+
# 统一格式
|
|
50
|
+
# {"command1": {"args": ["arg1", "arg2"], "kwargs": {"key1": "value1", "key2": "value2"}}}
|
|
51
|
+
if isinstance(self.args.action, dict):
|
|
52
|
+
commands_info = self.args.action
|
|
53
|
+
else:
|
|
54
|
+
commands_info = {}
|
|
55
|
+
for command in self.args.action:
|
|
56
|
+
commands_info[command] = {}
|
|
57
|
+
|
|
58
|
+
memory_dir = os.path.join(self.args.source_dir, ".auto-coder", "memory")
|
|
59
|
+
os.makedirs(memory_dir, exist_ok=True)
|
|
60
|
+
memory_file = os.path.join(memory_dir, "chat_history.json")
|
|
61
|
+
|
|
62
|
+
# 处理新会话
|
|
63
|
+
if self.args.new_session:
|
|
64
|
+
self._handle_new_session(memory_file)
|
|
65
|
+
if not self.args.query or (self.args.query_prefix and self.args.query == self.args.query_prefix) or (self.args.query_suffix and self.args.query == self.args.query_suffix):
|
|
66
|
+
return
|
|
67
|
+
|
|
68
|
+
# 加载聊天历史
|
|
69
|
+
chat_history = self._load_chat_history(memory_file)
|
|
70
|
+
chat_history["ask_conversation"].append(
|
|
71
|
+
{"role": "user", "content": self.args.query}
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# 获取聊天模型
|
|
75
|
+
if self.llm.get_sub_client("chat_model"):
|
|
76
|
+
chat_llm = self.llm.get_sub_client("chat_model")
|
|
77
|
+
else:
|
|
78
|
+
chat_llm = self.llm
|
|
79
|
+
|
|
80
|
+
# 构建对话上下文
|
|
81
|
+
loaded_conversations = self._build_conversations(commands_info, chat_history)
|
|
82
|
+
|
|
83
|
+
# 处理人工模型模式
|
|
84
|
+
if get_run_context().mode != RunMode.WEB and self.args.human_as_model:
|
|
85
|
+
return self._handle_human_as_model(loaded_conversations, chat_history, memory_file, commands_info)
|
|
86
|
+
|
|
87
|
+
# 计算耗时
|
|
88
|
+
start_time = time.time()
|
|
89
|
+
commit_file_name = None
|
|
90
|
+
|
|
91
|
+
# 根据命令类型处理不同的响应
|
|
92
|
+
v = self._get_response(commands_info, loaded_conversations, chat_llm)
|
|
93
|
+
if isinstance(v, tuple):
|
|
94
|
+
v, commit_file_name = v
|
|
95
|
+
|
|
96
|
+
# 输出响应
|
|
97
|
+
model_name = ",".join(get_llm_names(chat_llm))
|
|
98
|
+
assistant_response, last_meta = stream_out(
|
|
99
|
+
v,
|
|
100
|
+
request_id=self.args.request_id,
|
|
101
|
+
console=self.console,
|
|
102
|
+
model_name=model_name,
|
|
103
|
+
args=self.args
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
self.result_manager.append(content=assistant_response, meta={
|
|
107
|
+
"action": "chat",
|
|
108
|
+
"input": {
|
|
109
|
+
"query": self.args.query
|
|
110
|
+
}
|
|
111
|
+
})
|
|
112
|
+
|
|
113
|
+
# 处理学习命令的特殊逻辑
|
|
114
|
+
if "learn" in commands_info and commit_file_name:
|
|
115
|
+
self._handle_learn_command(commit_file_name, assistant_response)
|
|
116
|
+
|
|
117
|
+
# 打印统计信息
|
|
118
|
+
if last_meta:
|
|
119
|
+
self._print_stats(last_meta, start_time, model_name)
|
|
120
|
+
|
|
121
|
+
# 更新聊天历史
|
|
122
|
+
chat_history["ask_conversation"].append(
|
|
123
|
+
{"role": "assistant", "content": assistant_response}
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
with open(memory_file, "w", encoding="utf-8") as f:
|
|
127
|
+
json.dump(chat_history, f, ensure_ascii=False)
|
|
128
|
+
|
|
129
|
+
# 处理后续命令
|
|
130
|
+
self._handle_post_commands(commands_info, assistant_response, chat_history)
|
|
131
|
+
|
|
132
|
+
def _handle_new_session(self, memory_file):
|
|
133
|
+
"""处理新会话逻辑"""
|
|
134
|
+
if os.path.exists(memory_file):
|
|
135
|
+
with open(memory_file, "r", encoding="utf-8") as f:
|
|
136
|
+
old_chat_history = json.load(f)
|
|
137
|
+
if "conversation_history" not in old_chat_history:
|
|
138
|
+
old_chat_history["conversation_history"] = []
|
|
139
|
+
old_chat_history["conversation_history"].append(
|
|
140
|
+
old_chat_history.get("ask_conversation", []))
|
|
141
|
+
chat_history = {"ask_conversation": [],
|
|
142
|
+
"conversation_history": old_chat_history["conversation_history"]}
|
|
143
|
+
else:
|
|
144
|
+
chat_history = {"ask_conversation": [],
|
|
145
|
+
"conversation_history": []}
|
|
146
|
+
with open(memory_file, "w", encoding="utf-8") as f:
|
|
147
|
+
json.dump(chat_history, f, ensure_ascii=False)
|
|
148
|
+
|
|
149
|
+
self.result_manager.add_result(content=get_message("new_session_started"), meta={
|
|
150
|
+
"action": "chat",
|
|
151
|
+
"input": {
|
|
152
|
+
"query": self.args.query
|
|
153
|
+
}
|
|
154
|
+
})
|
|
155
|
+
self.console.print(
|
|
156
|
+
Panel(
|
|
157
|
+
get_message("new_session_started"),
|
|
158
|
+
title="Session Status",
|
|
159
|
+
expand=False,
|
|
160
|
+
border_style="green",
|
|
161
|
+
)
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
def _load_chat_history(self, memory_file):
|
|
165
|
+
"""加载聊天历史"""
|
|
166
|
+
if os.path.exists(memory_file):
|
|
167
|
+
with open(memory_file, "r", encoding="utf-8") as f:
|
|
168
|
+
chat_history = json.load(f)
|
|
169
|
+
if "conversation_history" not in chat_history:
|
|
170
|
+
chat_history["conversation_history"] = []
|
|
171
|
+
else:
|
|
172
|
+
chat_history = {"ask_conversation": [],
|
|
173
|
+
"conversation_history": []}
|
|
174
|
+
return chat_history
|
|
175
|
+
|
|
176
|
+
def _build_conversations(self, commands_info, chat_history):
|
|
177
|
+
"""构建对话上下文"""
|
|
178
|
+
source_count = 0
|
|
179
|
+
pre_conversations = []
|
|
180
|
+
context_content = self.args.context if self.args.context else ""
|
|
181
|
+
|
|
182
|
+
if self.args.context:
|
|
183
|
+
try:
|
|
184
|
+
context = json.loads(self.args.context)
|
|
185
|
+
if "file_content" in context:
|
|
186
|
+
context_content = context["file_content"]
|
|
187
|
+
except:
|
|
188
|
+
pass
|
|
189
|
+
|
|
190
|
+
pre_conversations.append(
|
|
191
|
+
{
|
|
192
|
+
"role": "user",
|
|
193
|
+
"content": f"请阅读下面的代码和文档:\n\n <files>\n{context_content}\n</files>",
|
|
194
|
+
},
|
|
195
|
+
)
|
|
196
|
+
pre_conversations.append(
|
|
197
|
+
{"role": "assistant", "content": "read"})
|
|
198
|
+
source_count += 1
|
|
199
|
+
|
|
200
|
+
# 构建索引和过滤文件
|
|
201
|
+
if "no_context" not in commands_info:
|
|
202
|
+
from autocoder.index.index import IndexManager
|
|
203
|
+
from autocoder.index.entry import build_index_and_filter_files
|
|
204
|
+
from autocoder.pyproject import PyProject
|
|
205
|
+
from autocoder.tsproject import TSProject
|
|
206
|
+
from autocoder.suffixproject import SuffixProject
|
|
207
|
+
|
|
208
|
+
if self.args.project_type == "ts":
|
|
209
|
+
pp = TSProject(args=self.args, llm=self.llm)
|
|
210
|
+
elif self.args.project_type == "py":
|
|
211
|
+
pp = PyProject(args=self.args, llm=self.llm)
|
|
212
|
+
else:
|
|
213
|
+
pp = SuffixProject(args=self.args, llm=self.llm, file_filter=None)
|
|
214
|
+
pp.run()
|
|
215
|
+
sources = pp.sources
|
|
216
|
+
|
|
217
|
+
# 应用模型过滤器
|
|
218
|
+
chat_llm = self.llm.get_sub_client("chat_model") or self.llm
|
|
219
|
+
model_filter = ModelPathFilter.from_model_object(chat_llm, self.args)
|
|
220
|
+
filtered_sources = []
|
|
221
|
+
printer = Printer()
|
|
222
|
+
for source in sources:
|
|
223
|
+
if model_filter.is_accessible(source.module_name):
|
|
224
|
+
filtered_sources.append(source)
|
|
225
|
+
else:
|
|
226
|
+
printer.print_in_terminal("index_file_filtered",
|
|
227
|
+
style="yellow",
|
|
228
|
+
file_path=source.module_name,
|
|
229
|
+
model_name=",".join(get_llm_names(chat_llm)))
|
|
230
|
+
|
|
231
|
+
s = build_index_and_filter_files(
|
|
232
|
+
llm=self.llm, args=self.args, sources=filtered_sources).to_str()
|
|
233
|
+
|
|
234
|
+
if s:
|
|
235
|
+
pre_conversations.append(
|
|
236
|
+
{
|
|
237
|
+
"role": "user",
|
|
238
|
+
"content": f"请阅读下面的代码和文档:\n\n <files>\n{s}\n</files>",
|
|
239
|
+
}
|
|
240
|
+
)
|
|
241
|
+
pre_conversations.append(
|
|
242
|
+
{"role": "assistant", "content": "read"})
|
|
243
|
+
source_count += 1
|
|
244
|
+
|
|
245
|
+
loaded_conversations = pre_conversations + chat_history["ask_conversation"]
|
|
246
|
+
return loaded_conversations
|
|
247
|
+
|
|
248
|
+
def _handle_human_as_model(self, loaded_conversations, chat_history, memory_file, commands_info):
|
|
249
|
+
"""处理人工模型模式"""
|
|
250
|
+
@byzerllm.prompt()
|
|
251
|
+
def chat_with_human_as_model(
|
|
252
|
+
source_codes, pre_conversations, last_conversation
|
|
253
|
+
):
|
|
254
|
+
"""
|
|
255
|
+
{% if source_codes %}
|
|
256
|
+
{{ source_codes }}
|
|
257
|
+
{% endif %}
|
|
258
|
+
|
|
259
|
+
{% if pre_conversations %}
|
|
260
|
+
下面是我们之间的历史对话,假设我是A,你是B。
|
|
261
|
+
<conversations>
|
|
262
|
+
{% for conv in pre_conversations %}
|
|
263
|
+
{{ "A" if conv.role == "user" else "B" }}: {{ conv.content }}
|
|
264
|
+
{% endfor %}
|
|
265
|
+
</conversations>
|
|
266
|
+
{% endif %}
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
参考上面的文件以及历史对话,回答用户的问题。
|
|
270
|
+
用户的问题: {{ last_conversation.content }}
|
|
271
|
+
"""
|
|
272
|
+
|
|
273
|
+
source_count = 0
|
|
274
|
+
if self.args.context:
|
|
275
|
+
source_count += 1
|
|
276
|
+
if "no_context" not in commands_info:
|
|
277
|
+
source_count += 1
|
|
278
|
+
|
|
279
|
+
source_codes_conversations = loaded_conversations[0: source_count * 2]
|
|
280
|
+
source_codes = ""
|
|
281
|
+
for conv in source_codes_conversations:
|
|
282
|
+
if conv["role"] == "user":
|
|
283
|
+
source_codes += conv["content"]
|
|
284
|
+
|
|
285
|
+
chat_content = chat_with_human_as_model.prompt(
|
|
286
|
+
source_codes=source_codes,
|
|
287
|
+
pre_conversations=loaded_conversations[source_count * 2: -1],
|
|
288
|
+
last_conversation=loaded_conversations[-1],
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
with open(self.args.target_file, "w", encoding="utf-8") as f:
|
|
292
|
+
f.write(chat_content)
|
|
293
|
+
|
|
294
|
+
try:
|
|
295
|
+
import pyperclip
|
|
296
|
+
pyperclip.copy(chat_content)
|
|
297
|
+
self.console.print(
|
|
298
|
+
Panel(
|
|
299
|
+
get_message("chat_human_as_model_instructions"),
|
|
300
|
+
title="Instructions",
|
|
301
|
+
border_style="blue",
|
|
302
|
+
expand=False,
|
|
303
|
+
)
|
|
304
|
+
)
|
|
305
|
+
except Exception:
|
|
306
|
+
logger.warning(get_message("clipboard_not_supported"))
|
|
307
|
+
self.console.print(
|
|
308
|
+
Panel(
|
|
309
|
+
get_message("human_as_model_instructions_no_clipboard"),
|
|
310
|
+
title="Instructions",
|
|
311
|
+
border_style="blue",
|
|
312
|
+
expand=False,
|
|
313
|
+
)
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
lines = []
|
|
317
|
+
while True:
|
|
318
|
+
line = prompt(FormattedText([("#00FF00", "> ")]), multiline=False)
|
|
319
|
+
line_lower = line.strip().lower()
|
|
320
|
+
if line_lower in ["eof", "/eof"]:
|
|
321
|
+
break
|
|
322
|
+
elif line_lower in ["/clear"]:
|
|
323
|
+
lines = []
|
|
324
|
+
print("\033[2J\033[H") # Clear terminal screen
|
|
325
|
+
continue
|
|
326
|
+
elif line_lower in ["/break"]:
|
|
327
|
+
raise Exception("User requested to break the operation.")
|
|
328
|
+
lines.append(line)
|
|
329
|
+
|
|
330
|
+
result = "\n".join(lines)
|
|
331
|
+
|
|
332
|
+
self.result_manager.append(content=result,
|
|
333
|
+
meta={"action": "chat", "input": {
|
|
334
|
+
"query": self.args.query
|
|
335
|
+
}})
|
|
336
|
+
|
|
337
|
+
# Update chat history with user's response
|
|
338
|
+
chat_history["ask_conversation"].append(
|
|
339
|
+
{"role": "assistant", "content": result}
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
with open(memory_file, "w", encoding="utf-8") as f:
|
|
343
|
+
json.dump(chat_history, f, ensure_ascii=False)
|
|
344
|
+
|
|
345
|
+
if "save" in commands_info:
|
|
346
|
+
save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
347
|
+
query=self.args.query,
|
|
348
|
+
response=result)
|
|
349
|
+
printer = Printer()
|
|
350
|
+
printer.print_in_terminal("memory_save_success")
|
|
351
|
+
return {}
|
|
352
|
+
|
|
353
|
+
def _get_response(self, commands_info, loaded_conversations, chat_llm):
|
|
354
|
+
"""根据命令类型获取响应"""
|
|
355
|
+
if "rag" in commands_info:
|
|
356
|
+
from autocoder.rag.rag_entry import RAGFactory
|
|
357
|
+
self.args.enable_rag_search = True
|
|
358
|
+
self.args.enable_rag_context = False
|
|
359
|
+
rag = RAGFactory.get_rag(llm=chat_llm, args=self.args, path="")
|
|
360
|
+
response = rag.stream_chat_oai(conversations=loaded_conversations)[0]
|
|
361
|
+
return (item for item in response)
|
|
362
|
+
|
|
363
|
+
elif "mcp" in commands_info:
|
|
364
|
+
mcp_server = get_mcp_server()
|
|
365
|
+
pos_args = commands_info["mcp"].get("args", [])
|
|
366
|
+
final_query = pos_args[0] if pos_args else self.args.query
|
|
367
|
+
response = mcp_server.send_request(
|
|
368
|
+
McpRequest(
|
|
369
|
+
query=final_query,
|
|
370
|
+
model=self.args.inference_model or self.args.model,
|
|
371
|
+
product_mode=self.args.product_mode
|
|
372
|
+
)
|
|
373
|
+
)
|
|
374
|
+
return [[response.result, None]]
|
|
375
|
+
|
|
376
|
+
elif "review" in commands_info:
|
|
377
|
+
from autocoder.agent.auto_review_commit import AutoReviewCommit
|
|
378
|
+
reviewer = AutoReviewCommit(llm=chat_llm, args=self.args)
|
|
379
|
+
pos_args = commands_info["review"].get("args", [])
|
|
380
|
+
final_query = pos_args[0] if pos_args else self.args.query
|
|
381
|
+
kwargs = commands_info["review"].get("kwargs", {})
|
|
382
|
+
commit_id = kwargs.get("commit", None)
|
|
383
|
+
return reviewer.review_commit(query=final_query, conversations=loaded_conversations, commit_id=commit_id)
|
|
384
|
+
|
|
385
|
+
elif "learn" in commands_info:
|
|
386
|
+
from autocoder.agent.auto_learn_from_commit import AutoLearnFromCommit
|
|
387
|
+
learner = AutoLearnFromCommit(llm=chat_llm, args=self.args)
|
|
388
|
+
pos_args = commands_info["learn"].get("args", [])
|
|
389
|
+
final_query = pos_args[0] if pos_args else self.args.query
|
|
390
|
+
return learner.learn_from_commit(query=final_query, conversations=loaded_conversations)
|
|
391
|
+
|
|
392
|
+
else:
|
|
393
|
+
# 预估token数量
|
|
394
|
+
dumped_conversations = json.dumps(loaded_conversations, ensure_ascii=False)
|
|
395
|
+
estimated_input_tokens = count_tokens(dumped_conversations)
|
|
396
|
+
printer = Printer()
|
|
397
|
+
printer.print_in_terminal("estimated_chat_input_tokens", style="yellow",
|
|
398
|
+
estimated_input_tokens=estimated_input_tokens)
|
|
399
|
+
|
|
400
|
+
return stream_chat_with_continue(
|
|
401
|
+
llm=chat_llm,
|
|
402
|
+
conversations=loaded_conversations,
|
|
403
|
+
llm_config={},
|
|
404
|
+
args=self.args
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
def _handle_learn_command(self, commit_file_name, assistant_response):
|
|
408
|
+
"""处理学习命令的特殊逻辑"""
|
|
409
|
+
if commit_file_name:
|
|
410
|
+
# 使用 ActionYmlFileManager 更新 YAML 文件
|
|
411
|
+
action_manager = ActionYmlFileManager(self.args.source_dir)
|
|
412
|
+
if not action_manager.update_yaml_field(commit_file_name, 'how_to_reproduce', assistant_response):
|
|
413
|
+
printer = Printer()
|
|
414
|
+
printer.print_in_terminal("yaml_save_error", style="red", yaml_file=commit_file_name)
|
|
415
|
+
|
|
416
|
+
def _print_stats(self, last_meta, start_time, model_name):
|
|
417
|
+
"""打印统计信息"""
|
|
418
|
+
elapsed_time = time.time() - start_time
|
|
419
|
+
printer = Printer()
|
|
420
|
+
speed = last_meta.generated_tokens_count / elapsed_time
|
|
421
|
+
|
|
422
|
+
# Get model info for pricing
|
|
423
|
+
from autocoder.utils import llms as llm_utils
|
|
424
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode) or {}
|
|
425
|
+
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
426
|
+
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
427
|
+
|
|
428
|
+
# Calculate costs
|
|
429
|
+
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
|
|
430
|
+
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
|
|
431
|
+
|
|
432
|
+
printer.print_in_terminal("stream_out_stats",
|
|
433
|
+
model_name=model_name,
|
|
434
|
+
elapsed_time=elapsed_time,
|
|
435
|
+
first_token_time=last_meta.first_token_time,
|
|
436
|
+
input_tokens=last_meta.input_tokens_count,
|
|
437
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
438
|
+
input_cost=round(input_cost, 4),
|
|
439
|
+
output_cost=round(output_cost, 4),
|
|
440
|
+
speed=round(speed, 2))
|
|
441
|
+
|
|
442
|
+
get_event_manager(self.args.event_file).write_result(
|
|
443
|
+
EventContentCreator.create_result(content=EventContentCreator.ResultTokenStatContent(
|
|
444
|
+
model_name=model_name,
|
|
445
|
+
elapsed_time=elapsed_time,
|
|
446
|
+
input_tokens=last_meta.input_tokens_count,
|
|
447
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
448
|
+
input_cost=round(input_cost, 4),
|
|
449
|
+
output_cost=round(output_cost, 4),
|
|
450
|
+
speed=round(speed, 2)
|
|
451
|
+
)).to_dict(), metadata=EventMetadata(
|
|
452
|
+
action_file=self.args.file
|
|
453
|
+
).to_dict())
|
|
454
|
+
|
|
455
|
+
def _handle_post_commands(self, commands_info, assistant_response, chat_history):
|
|
456
|
+
"""处理后续命令"""
|
|
457
|
+
if "copy" in commands_info:
|
|
458
|
+
# copy assistant_response to clipboard
|
|
459
|
+
import pyperclip
|
|
460
|
+
try:
|
|
461
|
+
pyperclip.copy(assistant_response)
|
|
462
|
+
except:
|
|
463
|
+
print("pyperclip not installed or clipboard is not supported, instruction will not be copied to clipboard.")
|
|
464
|
+
|
|
465
|
+
if "save" in commands_info:
|
|
466
|
+
tmp_dir = save_to_memory_file(ask_conversation=chat_history["ask_conversation"],
|
|
467
|
+
query=self.args.query,
|
|
468
|
+
response=assistant_response)
|
|
469
|
+
printer = Printer()
|
|
470
|
+
printer.print_in_terminal("memory_save_success", style="green", path=tmp_dir)
|
|
471
|
+
|
|
472
|
+
if len(commands_info["save"]["args"]) > 0:
|
|
473
|
+
# 保存到指定文件
|
|
474
|
+
with open(commands_info["save"]["args"][0], "w", encoding="utf-8") as f:
|
|
475
|
+
f.write(assistant_response)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from autocoder.utils.request_queue import (
|
|
9
|
+
request_queue,
|
|
10
|
+
RequestValue,
|
|
11
|
+
DefaultValue,
|
|
12
|
+
RequestOption,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class DesignerAgent:
|
|
17
|
+
def __init__(self, args, llm, raw_args):
|
|
18
|
+
self.args = args
|
|
19
|
+
self.llm = llm
|
|
20
|
+
self.raw_args = raw_args
|
|
21
|
+
|
|
22
|
+
def run(self):
|
|
23
|
+
"""执行 designer 命令的主要逻辑"""
|
|
24
|
+
from autocoder.agent.designer import SVGDesigner, SDDesigner, LogoDesigner
|
|
25
|
+
|
|
26
|
+
if self.args.agent_designer_mode == "svg":
|
|
27
|
+
designer = SVGDesigner(self.args, self.llm)
|
|
28
|
+
designer.run(self.args.query)
|
|
29
|
+
print("Successfully generated image in output.png")
|
|
30
|
+
elif self.args.agent_designer_mode == "sd":
|
|
31
|
+
designer = SDDesigner(self.args, self.llm)
|
|
32
|
+
designer.run(self.args.query)
|
|
33
|
+
print("Successfully generated image in output.jpg")
|
|
34
|
+
elif self.args.agent_designer_mode.startswith("logo"):
|
|
35
|
+
designer = LogoDesigner(self.args, self.llm)
|
|
36
|
+
designer.run(self.args.query)
|
|
37
|
+
print("Successfully generated image in output.png")
|
|
38
|
+
|
|
39
|
+
if self.args.request_id:
|
|
40
|
+
request_queue.add_request(
|
|
41
|
+
self.args.request_id,
|
|
42
|
+
RequestValue(
|
|
43
|
+
value=DefaultValue(
|
|
44
|
+
value="Successfully generated image"),
|
|
45
|
+
status=RequestOption.COMPLETED,
|
|
46
|
+
),
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
from rich.console import Console
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
|
|
9
|
+
from autocoder.utils.request_queue import (
|
|
10
|
+
request_queue,
|
|
11
|
+
RequestValue,
|
|
12
|
+
DefaultValue,
|
|
13
|
+
RequestOption,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class GenerateCommandAgent:
|
|
18
|
+
def __init__(self, args, llm, raw_args):
|
|
19
|
+
self.args = args
|
|
20
|
+
self.llm = llm
|
|
21
|
+
self.raw_args = raw_args
|
|
22
|
+
self.console = Console()
|
|
23
|
+
|
|
24
|
+
def run(self):
|
|
25
|
+
"""执行 generate_command 命令的主要逻辑"""
|
|
26
|
+
from autocoder.common.command_generator import generate_shell_script
|
|
27
|
+
|
|
28
|
+
shell_script = generate_shell_script(self.args, self.llm)
|
|
29
|
+
|
|
30
|
+
self.console.print(
|
|
31
|
+
Panel(
|
|
32
|
+
shell_script,
|
|
33
|
+
title="Shell Script",
|
|
34
|
+
border_style="magenta",
|
|
35
|
+
)
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
with open(os.path.join(".auto-coder", "exchange.txt"), "w", encoding="utf-8") as f:
|
|
39
|
+
f.write(shell_script)
|
|
40
|
+
|
|
41
|
+
request_queue.add_request(
|
|
42
|
+
self.args.request_id,
|
|
43
|
+
RequestValue(
|
|
44
|
+
value=DefaultValue(value=shell_script),
|
|
45
|
+
status=RequestOption.COMPLETED,
|
|
46
|
+
),
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|