auto-coder 0.1.259__py3-none-any.whl → 0.1.261__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/METADATA +1 -1
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/RECORD +36 -27
- autocoder/agent/auto_review_commit.py +51 -24
- autocoder/auto_coder.py +24 -1
- autocoder/chat_auto_coder.py +377 -399
- autocoder/chat_auto_coder_lang.py +20 -0
- autocoder/commands/__init__.py +0 -0
- autocoder/commands/auto_command.py +1174 -0
- autocoder/commands/tools.py +533 -0
- autocoder/common/__init__.py +8 -0
- autocoder/common/auto_coder_lang.py +61 -8
- autocoder/common/auto_configure.py +304 -0
- autocoder/common/code_auto_merge.py +2 -2
- autocoder/common/code_auto_merge_diff.py +2 -2
- autocoder/common/code_auto_merge_editblock.py +2 -2
- autocoder/common/code_auto_merge_strict_diff.py +2 -2
- autocoder/common/code_modification_ranker.py +8 -7
- autocoder/common/command_completer.py +557 -0
- autocoder/common/conf_validator.py +245 -0
- autocoder/common/conversation_pruner.py +131 -0
- autocoder/common/git_utils.py +82 -1
- autocoder/common/index_import_export.py +101 -0
- autocoder/common/result_manager.py +115 -0
- autocoder/common/shells.py +22 -6
- autocoder/common/utils_code_auto_generate.py +2 -2
- autocoder/dispacher/actions/action.py +45 -4
- autocoder/dispacher/actions/plugins/action_regex_project.py +13 -1
- autocoder/index/filter/quick_filter.py +22 -7
- autocoder/utils/auto_coder_utils/chat_stream_out.py +13 -6
- autocoder/utils/project_structure.py +15 -0
- autocoder/utils/thread_utils.py +4 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.259.dist-info → auto_coder-0.1.261.dist-info}/top_level.txt +0 -0
|
@@ -3,7 +3,26 @@ from byzerllm.utils import format_str_jinja2
|
|
|
3
3
|
|
|
4
4
|
MESSAGES = {
|
|
5
5
|
"en": {
|
|
6
|
+
"config_validation_error": "Config validation error: {{error}}",
|
|
7
|
+
"invalid_boolean_value": "Value '{{value}}' is not a valid boolean(true/false)",
|
|
8
|
+
"invalid_integer_value": "Value '{{value}}' is not a valid integer",
|
|
9
|
+
"invalid_float_value": "Value '{{value}}' is not a valid float",
|
|
10
|
+
"invalid_type_value": "Value '{{value}}' is not a valid type (expected: {{types}})",
|
|
11
|
+
"value_out_of_range": "Value {{value}} is out of allowed range({{min}}~{{max}})",
|
|
12
|
+
"invalid_choice": "Value '{{value}}' is not in allowed options({{allowed}})",
|
|
13
|
+
"unknown_config_key": "Unknown config key '{{key}}'",
|
|
14
|
+
"model_not_found": "Model '{{model}}' is not configured in models.yml",
|
|
15
|
+
"required_without_default": "Config key '{{key}}' requires explicit value",
|
|
16
|
+
"auto_command_action_break": "Command {{command}} execution failed (got {{action}} result), no result can be obtained, please try again",
|
|
17
|
+
"auto_command_break": "Auto command execution failed to execute command: {{command}}",
|
|
18
|
+
"auto_command_executing": "\n\n============= Executing command: {{command}} =============\n\n",
|
|
6
19
|
"model_provider_select_title": "Select Model Provider",
|
|
20
|
+
"auto_config_analyzing": "Analyzing configuration...",
|
|
21
|
+
"config_delete_success": "Successfully deleted configuration: {{key}}",
|
|
22
|
+
"config_not_found": "Configuration not found: {{key}}",
|
|
23
|
+
"config_invalid_format": "Invalid configuration format. Expected 'key:value'",
|
|
24
|
+
"config_value_empty": "Configuration value cannot be empty",
|
|
25
|
+
"config_set_success": "Successfully set configuration: {{key}} = {{value}}",
|
|
7
26
|
"model_provider_select_text": "Please select your model provider:",
|
|
8
27
|
"model_provider_volcano": "Volcano Engine",
|
|
9
28
|
"model_provider_siliconflow": "SiliconFlow AI",
|
|
@@ -71,7 +90,7 @@ MESSAGES = {
|
|
|
71
90
|
"Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
|
|
72
91
|
),
|
|
73
92
|
"code_generation_start": "Auto generate the code...",
|
|
74
|
-
"code_generation_complete": "{{ model_names}} Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
|
|
93
|
+
"code_generation_complete": "{{ model_names}} Code generation completed in {{ duration }} seconds (sampling_count: {{ sampling_count }}), input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
|
|
75
94
|
"code_merge_start": "Auto merge the code...",
|
|
76
95
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
77
96
|
"quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
|
|
@@ -89,12 +108,12 @@ MESSAGES = {
|
|
|
89
108
|
"ranking_start": "Start ranking {{ count }} candidates using model {{ model_name }}",
|
|
90
109
|
"ranking_failed_request": "Ranking request failed: {{ error }}",
|
|
91
110
|
"ranking_all_failed": "All ranking requests failed",
|
|
92
|
-
"ranking_complete": "{{ model_names }} Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}",
|
|
111
|
+
"ranking_complete": "{{ model_names }} Ranking completed in {{ elapsed }}s, total voters: {{ total_tasks }}, best candidate index: {{ best_candidate }}, scores: {{ scores }}, input_tokens: {{ input_tokens }}, output_tokens: {{ output_tokens }}, input_cost: {{ input_cost }}, output_cost: {{ output_cost }}, speed: {{ speed }} tokens/s",
|
|
93
112
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
94
113
|
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
95
114
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
96
115
|
"stream_out_stats": "Model: {{ model_name }}, Total time: {{ elapsed_time }} seconds, First token time: {{ first_token_time }} seconds, Speed: {{ speed }} tokens/s, Input tokens: {{ input_tokens }}, Output tokens: {{ output_tokens }}, Input cost: {{ input_cost }}, Output cost: {{ output_cost }}",
|
|
97
|
-
"quick_filter_stats": "{{ model_names }}
|
|
116
|
+
"quick_filter_stats": "{{ model_names }} Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, input cost: {{ input_cost }}, output cost: {{ output_cost }} speed: {{ speed }} tokens/s",
|
|
98
117
|
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
99
118
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
100
119
|
"quick_filter_title": "{{ model_name }} is analyzing how to filter context...",
|
|
@@ -110,9 +129,36 @@ MESSAGES = {
|
|
|
110
129
|
"estimated_chat_input_tokens": "Estimated chat input tokens: {{ estimated_input_tokens }}",
|
|
111
130
|
"estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
|
|
112
131
|
"model_has_access_restrictions": "{{model_name}} has access restrictions, cannot use the current function",
|
|
113
|
-
|
|
132
|
+
"auto_command_not_found": "Auto command not found: {{command}}. Please check your input and try again.",
|
|
133
|
+
"auto_command_failed": "Auto command failed: {{error}}. Please check your input and try again.",
|
|
134
|
+
"command_execution_result": "{{action}} execution result",
|
|
135
|
+
"satisfied_prompt": "Requirements satisfied, no further action needed",
|
|
136
|
+
"auto_command_analyzed": "Selected command",
|
|
137
|
+
"invalid_enum_value": "Value '{{value}}' is not in allowed values ({{allowed}})",
|
|
138
|
+
"no_changes_made": "⚠️ no changes made, the reason may be that the text block generated by the coding function has a problem, so it cannot be merged into the project",
|
|
139
|
+
"conversation_pruning_start": "Conversation pruning started, total tokens: {{total_tokens}}, safe zone: {{safe_zone}}",
|
|
140
|
+
},
|
|
114
141
|
"zh": {
|
|
142
|
+
"config_validation_error": "配置验证错误: {{error}}",
|
|
143
|
+
"invalid_boolean_value": "值 '{{value}}' 不是有效的布尔值(true/false)",
|
|
144
|
+
"invalid_integer_value": "值 '{{value}}' 不是有效的整数",
|
|
145
|
+
"invalid_float_value": "值 '{{value}}' 不是有效的浮点数",
|
|
146
|
+
"invalid_type_value": "值 '{{value}}' 不是有效的类型 (期望: {{types}})",
|
|
147
|
+
"value_out_of_range": "值 {value} 超出允许范围({min}~{max})",
|
|
148
|
+
"invalid_choice": "值 '{value}' 不在允许选项中({allowed})",
|
|
149
|
+
"unknown_config_key": "未知的配置项 '{key}'",
|
|
150
|
+
"model_not_found": "模型 '{model}' 未在 models.yml 中配置",
|
|
151
|
+
"required_without_default": "配置项 '{key}' 需要明确设置值",
|
|
152
|
+
"auto_command_action_break": "命令 {{command}} 执行失败(获取到了 {{action}} 的结果),无法获得任何结果,请重试",
|
|
153
|
+
"auto_command_break": "自动命令执行失败: {{command}}",
|
|
154
|
+
"auto_command_executing": "\n\n============= 正在执行指令: {{command}} =============\n\n",
|
|
115
155
|
"model_provider_select_title": "选择模型供应商",
|
|
156
|
+
"auto_config_analyzing": "正在分析配置...",
|
|
157
|
+
"config_delete_success": "成功删除配置: {{key}}",
|
|
158
|
+
"config_not_found": "未找到配置: {{key}}",
|
|
159
|
+
"config_invalid_format": "配置格式无效,应为'key:value'格式",
|
|
160
|
+
"config_value_empty": "配置值不能为空",
|
|
161
|
+
"config_set_success": "成功设置配置: {{key}} = {{value}}",
|
|
116
162
|
"model_provider_select_text": "请选择您的模型供应商:",
|
|
117
163
|
"model_provider_volcano": "火山方舟",
|
|
118
164
|
"model_provider_siliconflow": "硅基流动",
|
|
@@ -179,7 +225,7 @@ MESSAGES = {
|
|
|
179
225
|
"将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
|
|
180
226
|
),
|
|
181
227
|
"code_generation_start": "正在自动生成代码...",
|
|
182
|
-
"code_generation_complete": "{{ model_names}} 代码生成完成,耗时 {{ duration }}
|
|
228
|
+
"code_generation_complete": "{{ model_names}} 代码生成完成,耗时 {{ duration }} 秒 (采样数: {{ sampling_count }}), 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}, 速度: {{ speed }} tokens/秒",
|
|
183
229
|
"code_merge_start": "正在自动合并代码...",
|
|
184
230
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
185
231
|
"quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
|
|
@@ -190,7 +236,7 @@ MESSAGES = {
|
|
|
190
236
|
"unmerged_blocks_warning": "⚠️ 发现 {{ num_blocks }} 个未合并的代码块,更改将不会被应用。请手动检查后重试。",
|
|
191
237
|
"pylint_file_check_failed": "⚠️ {{ file_path }} 的 Pylint 检查失败。更改未应用。错误: {{ error_message }}",
|
|
192
238
|
"merge_success": "✅ 成功合并了 {{ num_files }} 个文件中的更改 {{ num_changes }}/{{ total_blocks }} 个代码块。",
|
|
193
|
-
"no_changes_made": "⚠️
|
|
239
|
+
"no_changes_made": "⚠️ 未对任何文件进行更改。这个原因可能是因为coding函数生成的文本块格式有问题,导致无法合并进项目",
|
|
194
240
|
"unmerged_blocks_title": "未合并代码块",
|
|
195
241
|
"unmerged_file_path": "文件: {{file_path}}",
|
|
196
242
|
"unmerged_search_block": "Search Block({{similarity}}):",
|
|
@@ -208,16 +254,23 @@ MESSAGES = {
|
|
|
208
254
|
"ranking_start": "开始对 {{ count }} 个候选项进行排序,使用模型 {{ model_name }} 打分",
|
|
209
255
|
"ranking_failed_request": "排序请求失败: {{ error }}",
|
|
210
256
|
"ranking_all_failed": "所有排序请求都失败",
|
|
211
|
-
"ranking_complete": "{{ model_names }} 排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}
|
|
257
|
+
"ranking_complete": "{{ model_names }} 排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }},输入成本: {{ input_cost }}, 输出成本: {{ output_cost }},速度: {{ speed }} tokens/秒",
|
|
212
258
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
213
259
|
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
214
260
|
"stream_out_stats": "模型: {{ model_name }},总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒, 速度: {{ speed }} tokens/秒, 输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }}",
|
|
215
|
-
"quick_filter_stats": "{{ model_names }} Quick
|
|
261
|
+
"quick_filter_stats": "{{ model_names }} Quick Filter 完成耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 输入成本: {{ input_cost }}, 输出成本: {{ output_cost }} 速度: {{ speed }} tokens/秒",
|
|
216
262
|
"quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
|
|
217
263
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|
|
218
264
|
"estimated_chat_input_tokens": "对话输入token预估为: {{ estimated_input_tokens }}",
|
|
219
265
|
"estimated_input_tokens_in_generate": "生成代码({{ generate_mode }})预计输入token数: {{ estimated_input_tokens_in_generate }}",
|
|
220
266
|
"model_has_access_restrictions": "{{model_name}} 有访问限制,无法使用当前功能",
|
|
267
|
+
"auto_command_not_found": "未找到自动命令: {{command}}。请检查您的输入并重试。",
|
|
268
|
+
"auto_command_failed": "自动命令执行失败: {{error}}。请检查您的输入并重试。",
|
|
269
|
+
"command_execution_result": "{{action}} 执行结果",
|
|
270
|
+
"satisfied_prompt": "已满足需求,无需进一步操作",
|
|
271
|
+
"auto_command_analyzed": "被选择指令",
|
|
272
|
+
"invalid_enum_value": "值 '{{value}}' 不在允许的值列表中 ({{allowed}})",
|
|
273
|
+
"conversation_pruning_start": "⚠️ 对话长度 {{total_tokens}} tokens 超过安全阈值 {{safe_zone}},开始修剪对话。"
|
|
221
274
|
}}
|
|
222
275
|
|
|
223
276
|
|
|
@@ -0,0 +1,304 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import time
|
|
5
|
+
import traceback
|
|
6
|
+
import uuid
|
|
7
|
+
from typing import Dict, Any, Optional, Union, Callable, List
|
|
8
|
+
from pydantic import BaseModel, Field, SkipValidation
|
|
9
|
+
import byzerllm
|
|
10
|
+
from byzerllm import ByzerLLM
|
|
11
|
+
from byzerllm.utils.client import code_utils
|
|
12
|
+
from autocoder.common.printer import Printer
|
|
13
|
+
from byzerllm.utils.str2model import to_model
|
|
14
|
+
from autocoder.utils.auto_coder_utils.chat_stream_out import stream_out
|
|
15
|
+
from autocoder.common.result_manager import ResultManager
|
|
16
|
+
from autocoder.utils import llms as llms_utils
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
class ConfigMessage(BaseModel):
|
|
20
|
+
role: str
|
|
21
|
+
content: str
|
|
22
|
+
|
|
23
|
+
class ExtenedConfigMessage(BaseModel):
|
|
24
|
+
message: ConfigMessage
|
|
25
|
+
timestamp: str
|
|
26
|
+
|
|
27
|
+
class ConfigConversation(BaseModel):
|
|
28
|
+
history: Dict[str, ExtenedConfigMessage]
|
|
29
|
+
current_conversation: List[ConfigMessage]
|
|
30
|
+
|
|
31
|
+
def save_to_memory_file(query: str, response: str):
|
|
32
|
+
"""Save conversation to memory file using ConfigConversation structure"""
|
|
33
|
+
memory_dir = os.path.join(".auto-coder", "memory")
|
|
34
|
+
os.makedirs(memory_dir, exist_ok=True)
|
|
35
|
+
file_path = os.path.join(memory_dir, "config_chat_history.json")
|
|
36
|
+
|
|
37
|
+
# Create new message objects
|
|
38
|
+
user_msg = ConfigMessage(role="user", content=query)
|
|
39
|
+
assistant_msg = ConfigMessage(role="assistant", content=response)
|
|
40
|
+
|
|
41
|
+
extended_user_msg = ExtenedConfigMessage(
|
|
42
|
+
message=user_msg,
|
|
43
|
+
timestamp=str(int(time.time()))
|
|
44
|
+
)
|
|
45
|
+
extended_assistant_msg = ExtenedConfigMessage(
|
|
46
|
+
message=assistant_msg,
|
|
47
|
+
timestamp=str(int(time.time()))
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
# Load existing conversation or create new
|
|
51
|
+
if os.path.exists(file_path):
|
|
52
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
53
|
+
try:
|
|
54
|
+
existing_conv = ConfigConversation.model_validate_json(f.read())
|
|
55
|
+
except Exception:
|
|
56
|
+
existing_conv = ConfigConversation(
|
|
57
|
+
history={},
|
|
58
|
+
current_conversation=[]
|
|
59
|
+
)
|
|
60
|
+
else:
|
|
61
|
+
existing_conv = ConfigConversation(
|
|
62
|
+
history={},
|
|
63
|
+
current_conversation=[]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
existing_conv.current_conversation.append(extended_user_msg)
|
|
67
|
+
existing_conv.current_conversation.append(extended_assistant_msg)
|
|
68
|
+
# Save updated conversation
|
|
69
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
70
|
+
f.write(existing_conv.model_dump_json(indent=2))
|
|
71
|
+
|
|
72
|
+
class MemoryConfig(BaseModel):
|
|
73
|
+
"""
|
|
74
|
+
A model to encapsulate memory configuration and operations.
|
|
75
|
+
"""
|
|
76
|
+
memory: Dict[str, Any]
|
|
77
|
+
save_memory_func: SkipValidation[Callable]
|
|
78
|
+
|
|
79
|
+
class Config:
|
|
80
|
+
arbitrary_types_allowed = True
|
|
81
|
+
|
|
82
|
+
def configure(self, conf: str, skip_print: bool = False) -> None:
|
|
83
|
+
"""
|
|
84
|
+
Configure memory with the given key-value pair.
|
|
85
|
+
"""
|
|
86
|
+
printer = Printer()
|
|
87
|
+
parts = conf.split(None, 1)
|
|
88
|
+
if len(parts) == 2 and parts[0] in ["/drop", "/unset", "/remove"]:
|
|
89
|
+
key = parts[1].strip()
|
|
90
|
+
if key in self.memory["conf"]:
|
|
91
|
+
del self.memory["conf"][key]
|
|
92
|
+
self.save_memory_func()
|
|
93
|
+
printer.print_in_terminal("config_delete_success", style="green", key=key)
|
|
94
|
+
else:
|
|
95
|
+
printer.print_in_terminal("config_not_found", style="yellow", key=key)
|
|
96
|
+
else:
|
|
97
|
+
parts = conf.split(":", 1)
|
|
98
|
+
if len(parts) != 2:
|
|
99
|
+
printer.print_in_terminal("config_invalid_format", style="red")
|
|
100
|
+
return
|
|
101
|
+
key, value = parts
|
|
102
|
+
key = key.strip()
|
|
103
|
+
value = value.strip()
|
|
104
|
+
if not value:
|
|
105
|
+
printer.print_in_terminal("config_value_empty", style="red")
|
|
106
|
+
return
|
|
107
|
+
self.memory["conf"][key] = value
|
|
108
|
+
self.save_memory_func()
|
|
109
|
+
if not skip_print:
|
|
110
|
+
printer.print_in_terminal("config_set_success", style="green", key=key, value=value)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class AutoConfigRequest(BaseModel):
|
|
115
|
+
query: str = Field(..., description="用户原始请求内容")
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class AutoConfigResponse(BaseModel):
|
|
119
|
+
configs: List[Dict[str, Any]] = Field(default_factory=list)
|
|
120
|
+
reasoning: str = ""
|
|
121
|
+
|
|
122
|
+
class ConfigAutoTuner:
|
|
123
|
+
def __init__(self, llm: Union[byzerllm.ByzerLLM, byzerllm.SimpleByzerLLM], memory_config: MemoryConfig):
|
|
124
|
+
self.llm = llm
|
|
125
|
+
self.memory_config = memory_config
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def configure(self, conf: str, skip_print: bool = False) -> None:
|
|
129
|
+
"""
|
|
130
|
+
Delegate configuration to MemoryConfig instance.
|
|
131
|
+
"""
|
|
132
|
+
self.memory_config.configure(conf, skip_print)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@byzerllm.prompt()
|
|
136
|
+
def config_readme(self) -> str:
|
|
137
|
+
"""
|
|
138
|
+
# 配置项说明
|
|
139
|
+
## auto_merge: 代码合并方式,可选值为editblock、diff、wholefile.
|
|
140
|
+
- editblock: 生成 SEARCH/REPLACE 块,然后根据 SEARCH块到对应的源码查找,如果相似度阈值大于 editblock_similarity, 那么则将
|
|
141
|
+
找到的代码块替换为 REPLACE 块。大部分情况都推荐使用 editblock。
|
|
142
|
+
- wholefile: 重新生成整个文件,然后替换原来的文件。对于重构场景,推荐使用 wholefile。
|
|
143
|
+
- diff: 生成标准 git diff 格式,适用于简单的代码修改。
|
|
144
|
+
|
|
145
|
+
## editblock_similarity: editblock相似度阈值
|
|
146
|
+
- editblock相似度阈值,取值范围为0-1,默认值为0.9。如果设置的太低,虽然能合并进去,但是会引入错误。推荐不要修改该值。
|
|
147
|
+
|
|
148
|
+
## generate_times_same_model: 相同模型生成次数
|
|
149
|
+
当进行生成代码时,大模型会对同一个需求生成多份代码,然后会使用 generate_rerank_model 模型对多份代码进行重排序,
|
|
150
|
+
然后选择得分最高的代码。一般次数越多,最终得到正确的代码概率越高。默认值为1,推荐设置为3。但是设置值越多,可能速度就越慢,消耗的token也越多。
|
|
151
|
+
|
|
152
|
+
## skip_filter_index: 是否跳过索引过滤
|
|
153
|
+
是否跳过根据用户的query 自动查找上下文。推荐设置为 false
|
|
154
|
+
|
|
155
|
+
## skip_build_index: 是否跳过索引构建
|
|
156
|
+
是否自动构建索引。推荐设置为 false。注意,如果该值设置为 true, 那么 skip_filter_index 设置不会生效。
|
|
157
|
+
|
|
158
|
+
## rank_times_same_model: 相同模型重排序次数
|
|
159
|
+
默认值为1. 如果 generate_times_same_model 参数设置大于1,那么 coding 函数会自动对多份代码进行重排序。
|
|
160
|
+
rank_times_same_model 表示重拍的次数,次数越多,选择到最好的代码的可能性越高,但是也会显著增加消耗的token和时间。
|
|
161
|
+
建议保持默认,要修改也建议不要超过3。
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
def command_readme(self) -> str:
|
|
165
|
+
"""
|
|
166
|
+
# 命令说明
|
|
167
|
+
## /chat: 进入配置对话模式
|
|
168
|
+
## /coding: 进入代码生成模式
|
|
169
|
+
"""
|
|
170
|
+
|
|
171
|
+
@byzerllm.prompt()
|
|
172
|
+
def _generate_config_str(self, request: AutoConfigRequest) -> str:
|
|
173
|
+
"""
|
|
174
|
+
配置项说明:
|
|
175
|
+
<config_readme>
|
|
176
|
+
{{ config_readme }}
|
|
177
|
+
</config_readme>
|
|
178
|
+
|
|
179
|
+
用户请求:
|
|
180
|
+
<query>
|
|
181
|
+
{{ query }}
|
|
182
|
+
</query>
|
|
183
|
+
|
|
184
|
+
当前配置:
|
|
185
|
+
<current_conf>
|
|
186
|
+
{{ current_conf }}
|
|
187
|
+
</current_conf>
|
|
188
|
+
|
|
189
|
+
上次执行情况:
|
|
190
|
+
<last_execution_stat>
|
|
191
|
+
{{ last_execution_stat }}
|
|
192
|
+
</last_execution_stat>
|
|
193
|
+
|
|
194
|
+
阅读配置说明,根据用户请求和当前配置以及上次执行情况,生成优化参数,严格使用以下JSON格式:
|
|
195
|
+
|
|
196
|
+
```json
|
|
197
|
+
{
|
|
198
|
+
"configs": [{
|
|
199
|
+
"config": {
|
|
200
|
+
"auto_merge": "editblock",
|
|
201
|
+
},
|
|
202
|
+
"reasoning": "配置变更原因",
|
|
203
|
+
}
|
|
204
|
+
]
|
|
205
|
+
}
|
|
206
|
+
```
|
|
207
|
+
"""
|
|
208
|
+
return {
|
|
209
|
+
"query": request.query,
|
|
210
|
+
"current_conf": json.dumps(self.memory_config.memory["conf"], indent=2),
|
|
211
|
+
"last_execution_stat": "",
|
|
212
|
+
"config_readme": self.config_readme.prompt()
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
def tune(self, request: AutoConfigRequest) -> 'AutoConfigResponse':
|
|
216
|
+
result_manager = ResultManager()
|
|
217
|
+
try:
|
|
218
|
+
# 获取 prompt 内容
|
|
219
|
+
prompt = self._generate_config_str.prompt(request)
|
|
220
|
+
|
|
221
|
+
# 构造对话上下文
|
|
222
|
+
conversations = [{"role": "user", "content": prompt}]
|
|
223
|
+
|
|
224
|
+
def extract_command_response(content):
|
|
225
|
+
# 提取 JSON 并转换为 AutoConfigResponse
|
|
226
|
+
try:
|
|
227
|
+
response = to_model(content, AutoConfigResponse)
|
|
228
|
+
return response.reasoning
|
|
229
|
+
except Exception as e:
|
|
230
|
+
return content
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
# 使用 stream_out 进行输出
|
|
234
|
+
model_name = ",".join(llms_utils.get_llm_names(self.llm))
|
|
235
|
+
printer = Printer()
|
|
236
|
+
title = printer.get_message_from_key("auto_config_analyzing")
|
|
237
|
+
start_time = time.monotonic()
|
|
238
|
+
result, last_meta = stream_out(
|
|
239
|
+
self.llm.stream_chat_oai(conversations=conversations, delta_mode=True),
|
|
240
|
+
model_name=self.llm.default_model_name,
|
|
241
|
+
title=title,
|
|
242
|
+
display_func=extract_command_response
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if last_meta:
|
|
246
|
+
elapsed_time = time.monotonic() - start_time
|
|
247
|
+
printer = Printer()
|
|
248
|
+
speed = last_meta.generated_tokens_count / elapsed_time
|
|
249
|
+
|
|
250
|
+
# Get model info for pricing
|
|
251
|
+
from autocoder.utils import llms as llm_utils
|
|
252
|
+
model_info = llm_utils.get_model_info(model_name, self.args.product_mode) or {}
|
|
253
|
+
input_price = model_info.get("input_price", 0.0) if model_info else 0.0
|
|
254
|
+
output_price = model_info.get("output_price", 0.0) if model_info else 0.0
|
|
255
|
+
|
|
256
|
+
# Calculate costs
|
|
257
|
+
input_cost = (last_meta.input_tokens_count * input_price) / 1000000 # Convert to millions
|
|
258
|
+
output_cost = (last_meta.generated_tokens_count * output_price) / 1000000 # Convert to millions
|
|
259
|
+
|
|
260
|
+
printer.print_in_terminal("stream_out_stats",
|
|
261
|
+
model_name=",".join(llms_utils.get_llm_names(self.llm)),
|
|
262
|
+
elapsed_time=elapsed_time,
|
|
263
|
+
first_token_time=last_meta.first_token_time,
|
|
264
|
+
input_tokens=last_meta.input_tokens_count,
|
|
265
|
+
output_tokens=last_meta.generated_tokens_count,
|
|
266
|
+
input_cost=round(input_cost, 4),
|
|
267
|
+
output_cost=round(output_cost, 4),
|
|
268
|
+
speed=round(speed, 2))
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
# 提取 JSON 并转换为 AutoConfigResponse
|
|
272
|
+
response = to_model(result, AutoConfigResponse)
|
|
273
|
+
|
|
274
|
+
# 保存对话记录
|
|
275
|
+
save_to_memory_file(
|
|
276
|
+
query=request.query,
|
|
277
|
+
response=response.model_dump_json(indent=2)
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
content = response.reasoning or "success"
|
|
281
|
+
for config in response.configs:
|
|
282
|
+
for k, v in config["config"].items():
|
|
283
|
+
self.configure(f"{k}:{v}")
|
|
284
|
+
content += f"\nconf({k}:{v})"
|
|
285
|
+
|
|
286
|
+
result_manager = ResultManager()
|
|
287
|
+
|
|
288
|
+
result_manager.add_result(content=content, meta={
|
|
289
|
+
"action": "help",
|
|
290
|
+
"input": {
|
|
291
|
+
"query": request.query
|
|
292
|
+
}
|
|
293
|
+
})
|
|
294
|
+
return response
|
|
295
|
+
except Exception as e:
|
|
296
|
+
v = f"help error: {str(e)} {traceback.format_exc()}"
|
|
297
|
+
logger.error(v)
|
|
298
|
+
result_manager.add_result(content=v, meta={
|
|
299
|
+
"action": "help",
|
|
300
|
+
"input": {
|
|
301
|
+
"query": request.query
|
|
302
|
+
}
|
|
303
|
+
})
|
|
304
|
+
return AutoConfigResponse()
|
|
@@ -157,7 +157,7 @@ class CodeAutoMerge:
|
|
|
157
157
|
# get the file name
|
|
158
158
|
file_name = os.path.basename(self.args.file)
|
|
159
159
|
|
|
160
|
-
if not force_skip_git:
|
|
160
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
161
161
|
try:
|
|
162
162
|
git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
|
|
163
163
|
except Exception as e:
|
|
@@ -176,6 +176,6 @@ class CodeAutoMerge:
|
|
|
176
176
|
f.write(block.content)
|
|
177
177
|
|
|
178
178
|
self.printer.print_in_terminal("files_merged", total=total)
|
|
179
|
-
if not force_skip_git:
|
|
179
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
180
180
|
commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
|
|
181
181
|
git_utils.print_commit_info(commit_result=commit_result)
|
|
@@ -511,7 +511,7 @@ class CodeAutoMergeDiff:
|
|
|
511
511
|
# get the file name
|
|
512
512
|
file_name = os.path.basename(self.args.file)
|
|
513
513
|
|
|
514
|
-
if not force_skip_git:
|
|
514
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
515
515
|
try:
|
|
516
516
|
git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
|
|
517
517
|
except Exception as e:
|
|
@@ -522,6 +522,6 @@ class CodeAutoMergeDiff:
|
|
|
522
522
|
self.apply_edits(edits)
|
|
523
523
|
|
|
524
524
|
self.printer.print_in_terminal("files_merged_total", total=total)
|
|
525
|
-
if not force_skip_git:
|
|
525
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
526
526
|
commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
|
|
527
527
|
git_utils.print_commit_info(commit_result=commit_result)
|
|
@@ -360,7 +360,7 @@ class CodeAutoMergeEditBlock:
|
|
|
360
360
|
file_path=file_path,
|
|
361
361
|
error_message=error_message)
|
|
362
362
|
|
|
363
|
-
if changes_made and not force_skip_git:
|
|
363
|
+
if changes_made and not force_skip_git and not self.args.skip_commit:
|
|
364
364
|
try:
|
|
365
365
|
git_utils.commit_changes(
|
|
366
366
|
self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}"
|
|
@@ -400,7 +400,7 @@ class CodeAutoMergeEditBlock:
|
|
|
400
400
|
)
|
|
401
401
|
|
|
402
402
|
if changes_made:
|
|
403
|
-
if not force_skip_git:
|
|
403
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
404
404
|
try:
|
|
405
405
|
commit_result = git_utils.commit_changes(
|
|
406
406
|
self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}"
|
|
@@ -209,7 +209,7 @@ class CodeAutoMergeStrictDiff:
|
|
|
209
209
|
# get the file name
|
|
210
210
|
file_name = os.path.basename(self.args.file)
|
|
211
211
|
|
|
212
|
-
if not force_skip_git:
|
|
212
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
213
213
|
try:
|
|
214
214
|
git_utils.commit_changes(self.args.source_dir, f"auto_coder_pre_{file_name}_{md5}")
|
|
215
215
|
except Exception as e:
|
|
@@ -232,7 +232,7 @@ class CodeAutoMergeStrictDiff:
|
|
|
232
232
|
raise Exception("Error applying diff to file: " + path)
|
|
233
233
|
|
|
234
234
|
self.printer.print_in_terminal("files_merged_total", total=total)
|
|
235
|
-
if not force_skip_git:
|
|
235
|
+
if not force_skip_git and not self.args.skip_commit:
|
|
236
236
|
commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
|
|
237
237
|
git_utils.print_commit_info(commit_result=commit_result)
|
|
238
238
|
|
|
@@ -18,10 +18,11 @@ class CodeModificationRanker:
|
|
|
18
18
|
def __init__(self, llm: byzerllm.ByzerLLM, args: AutoCoderArgs):
|
|
19
19
|
self.llm = llm
|
|
20
20
|
self.args = args
|
|
21
|
-
self.llms = self.llm.get_sub_client(
|
|
22
|
-
|
|
21
|
+
self.llms = self.llm.get_sub_client("generate_rerank_model") or [self.llm]
|
|
22
|
+
|
|
23
23
|
if not isinstance(self.llms, list):
|
|
24
24
|
self.llms = [self.llms]
|
|
25
|
+
|
|
25
26
|
self.printer = Printer()
|
|
26
27
|
|
|
27
28
|
@byzerllm.prompt()
|
|
@@ -65,9 +66,7 @@ class CodeModificationRanker:
|
|
|
65
66
|
if len(generate_result.contents) == 1:
|
|
66
67
|
self.printer.print_in_terminal("ranking_skip", style="blue")
|
|
67
68
|
return generate_result
|
|
68
|
-
|
|
69
|
-
self.printer.print_in_terminal(
|
|
70
|
-
"ranking_start", style="blue", count=len(generate_result.contents))
|
|
69
|
+
|
|
71
70
|
rank_times = self.args.rank_times_same_model
|
|
72
71
|
total_tasks = len(self.llms) * rank_times
|
|
73
72
|
|
|
@@ -79,7 +78,7 @@ class CodeModificationRanker:
|
|
|
79
78
|
with ThreadPoolExecutor(max_workers=total_tasks) as executor:
|
|
80
79
|
# Submit tasks for each model and generate_times
|
|
81
80
|
futures = []
|
|
82
|
-
for llm in self.llms:
|
|
81
|
+
for llm in self.llms:
|
|
83
82
|
model_name = ",".join(get_llm_names(llm))
|
|
84
83
|
self.printer.print_in_terminal(
|
|
85
84
|
"ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
|
|
@@ -159,6 +158,7 @@ class CodeModificationRanker:
|
|
|
159
158
|
reverse=True)
|
|
160
159
|
|
|
161
160
|
elapsed = time.time() - start_time
|
|
161
|
+
speed = generated_tokens_count / elapsed
|
|
162
162
|
# Format scores for logging
|
|
163
163
|
score_details = ", ".join(
|
|
164
164
|
[f"candidate {i}: {candidate_scores[i]:.2f}" for i in sorted_candidates])
|
|
@@ -173,7 +173,8 @@ class CodeModificationRanker:
|
|
|
173
173
|
output_tokens=generated_tokens_count,
|
|
174
174
|
input_cost=total_input_cost,
|
|
175
175
|
output_cost=total_output_cost,
|
|
176
|
-
model_names=", ".join(model_names)
|
|
176
|
+
model_names=", ".join(model_names),
|
|
177
|
+
speed=f"{speed:.2f}"
|
|
177
178
|
)
|
|
178
179
|
|
|
179
180
|
rerank_contents = [generate_result.contents[i]
|