auto-coder 0.1.251__py3-none-any.whl → 0.1.252__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/METADATA +2 -2
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/RECORD +26 -24
- autocoder/auto_coder.py +28 -4
- autocoder/auto_coder_rag.py +198 -35
- autocoder/chat_auto_coder.py +56 -3
- autocoder/chat_auto_coder_lang.py +21 -3
- autocoder/common/__init__.py +1 -0
- autocoder/common/auto_coder_lang.py +6 -4
- autocoder/common/code_modification_ranker.py +3 -3
- autocoder/common/global_cancel.py +21 -0
- autocoder/dispacher/actions/action.py +29 -8
- autocoder/dispacher/actions/plugins/action_regex_project.py +17 -5
- autocoder/index/filter/quick_filter.py +4 -6
- autocoder/index/index.py +13 -6
- autocoder/models.py +87 -6
- autocoder/rag/doc_filter.py +1 -3
- autocoder/rag/long_context_rag.py +7 -5
- autocoder/rag/token_limiter.py +1 -3
- autocoder/utils/auto_coder_utils/chat_stream_out.py +13 -2
- autocoder/utils/llms.py +15 -1
- autocoder/utils/thread_utils.py +201 -0
- autocoder/version.py +1 -1
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.251.dist-info → auto_coder-0.1.252.dist-info}/top_level.txt +0 -0
|
@@ -85,7 +85,7 @@ MESSAGES = {
|
|
|
85
85
|
"design_desc": "Generate SVG image based on the provided description",
|
|
86
86
|
"commit_desc": "Auto generate yaml file and commit changes based on user's manual changes",
|
|
87
87
|
"models_desc": "Manage model configurations, only available in lite mode",
|
|
88
|
-
"models_usage": "Usage: /models /list|/add|/add_model|/remove ...",
|
|
88
|
+
"models_usage": "Usage: /models /list|/add|/add_model|/remove|/price|/speed ...",
|
|
89
89
|
"models_added": "Added/Updated model '{{name}}' successfully.",
|
|
90
90
|
"models_add_failed": "Failed to add model '{{name}}'. Model not found in defaults.",
|
|
91
91
|
"models_add_usage": "Usage: /models /add <name> <api_key> or\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -96,6 +96,14 @@ MESSAGES = {
|
|
|
96
96
|
"models_add_model_remove": "Model '{{name}}' not found.",
|
|
97
97
|
"models_add_model_removed": "Removed model: {{name}}",
|
|
98
98
|
"models_unknown_subcmd": "Unknown subcommand: {{subcmd}}",
|
|
99
|
+
"models_input_price_updated": "Updated input price for model {{name}} to {{price}} M/token",
|
|
100
|
+
"models_output_price_updated": "Updated output price for model {{name}} to {{price}} M/token",
|
|
101
|
+
"models_invalid_price": "Invalid price value: {{error}}",
|
|
102
|
+
"models_input_price_usage": "Usage: /models /input_price <name> <value>",
|
|
103
|
+
"models_output_price_usage": "Usage: /models /output_price <name> <value>",
|
|
104
|
+
"models_speed_updated": "Updated speed for model {{name}} to {{speed}} s/request",
|
|
105
|
+
"models_invalid_speed": "Invalid speed value: {{error}}",
|
|
106
|
+
"models_speed_usage": "Usage: /models /speed <name> <value>",
|
|
99
107
|
"models_title": "All Models (内置 + models.json)",
|
|
100
108
|
"models_no_models": "No models found.",
|
|
101
109
|
"models_lite_only": "The /models command is only available in lite mode",
|
|
@@ -117,6 +125,7 @@ MESSAGES = {
|
|
|
117
125
|
"commit_message": "{{ model_name }} Generated commit message: {{ message }}",
|
|
118
126
|
"commit_failed": "{{ model_name }} Failed to generate commit message: {{ error }}",
|
|
119
127
|
"confirm_execute": "Do you want to execute this script?",
|
|
128
|
+
"official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
|
|
120
129
|
},
|
|
121
130
|
"zh": {
|
|
122
131
|
"commit_generating": "{{ model_name }} 正在生成提交信息...",
|
|
@@ -204,7 +213,7 @@ MESSAGES = {
|
|
|
204
213
|
"conf_value": "值",
|
|
205
214
|
"conf_title": "配置设置",
|
|
206
215
|
"conf_subtitle": "使用 /conf <key>:<value> 修改这些设置",
|
|
207
|
-
"models_usage": "用法: /models /list|/add|/add_model|/remove ...",
|
|
216
|
+
"models_usage": "用法: /models /list|/add|/add_model|/remove|/price|/speed ...",
|
|
208
217
|
"models_added": "成功添加/更新模型 '{{name}}'。",
|
|
209
218
|
"models_add_failed": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。",
|
|
210
219
|
"models_add_usage": "用法: /models /add <name> <api_key> 或\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
|
|
@@ -215,6 +224,14 @@ MESSAGES = {
|
|
|
215
224
|
"models_add_model_remove": "找不到模型 '{{name}}'。",
|
|
216
225
|
"models_add_model_removed": "已移除模型: {{name}}",
|
|
217
226
|
"models_unknown_subcmd": "未知的子命令: {{subcmd}}",
|
|
227
|
+
"models_input_price_updated": "已更新模型 {{name}} 的输入价格为 {{price}} M/token",
|
|
228
|
+
"models_output_price_updated": "已更新模型 {{name}} 的输出价格为 {{price}} M/token",
|
|
229
|
+
"models_invalid_price": "无效的价格值: {{error}}",
|
|
230
|
+
"models_input_price_usage": "用法: /models /input_price <name> <value>",
|
|
231
|
+
"models_output_price_usage": "用法: /models /output_price <name> <value>",
|
|
232
|
+
"models_speed_updated": "已更新模型 {{name}} 的速度为 {{speed}} 秒/请求",
|
|
233
|
+
"models_invalid_speed": "无效的速度值: {{error}}",
|
|
234
|
+
"models_speed_usage": "用法: /models /speed <name> <value>",
|
|
218
235
|
"models_title": "所有模型 (内置 + models.json)",
|
|
219
236
|
"models_no_models": "未找到任何模型。",
|
|
220
237
|
"models_lite_only": "/models 命令仅在 lite 模式下可用",
|
|
@@ -232,7 +249,8 @@ MESSAGES = {
|
|
|
232
249
|
"remove_files_none": "没有文件被移除。",
|
|
233
250
|
"files_removed": "移除的文件",
|
|
234
251
|
"models_api_key_empty": "警告: {{name}} API key 为空。请设置一个有效的 API key。",
|
|
235
|
-
"confirm_execute": "
|
|
252
|
+
"confirm_execute": "是否执行此脚本?",
|
|
253
|
+
"official_doc": "官方文档: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
|
|
236
254
|
}
|
|
237
255
|
}
|
|
238
256
|
|
autocoder/common/__init__.py
CHANGED
|
@@ -254,6 +254,7 @@ class AutoCoderArgs(pydantic.BaseModel):
|
|
|
254
254
|
planner_model: Optional[str] = ""
|
|
255
255
|
voice2text_model: Optional[str] = ""
|
|
256
256
|
text2voice_model: Optional[str] = ""
|
|
257
|
+
commit_model: Optional[str] = ""
|
|
257
258
|
|
|
258
259
|
skip_build_index: Optional[bool] = False
|
|
259
260
|
skip_filter_index: Optional[bool] = False
|
|
@@ -3,6 +3,7 @@ from byzerllm.utils import format_str_jinja2
|
|
|
3
3
|
|
|
4
4
|
MESSAGES = {
|
|
5
5
|
"en": {
|
|
6
|
+
"generation_cancelled": "[Interrupted] Generation cancelled",
|
|
6
7
|
"model_not_found": "Model {{model_name}} not found",
|
|
7
8
|
"generating_shell_script": "Generating Shell Script",
|
|
8
9
|
"new_session_started": "New session started. Previous chat history has been archived.",
|
|
@@ -51,7 +52,7 @@ MESSAGES = {
|
|
|
51
52
|
"Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
|
|
52
53
|
),
|
|
53
54
|
"code_generation_start": "Auto generate the code...",
|
|
54
|
-
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}",
|
|
55
|
+
"code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
55
56
|
"code_merge_start": "Auto merge the code...",
|
|
56
57
|
"code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
|
|
57
58
|
"quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
|
|
@@ -73,7 +74,7 @@ MESSAGES = {
|
|
|
73
74
|
"ranking_process_failed": "Ranking process failed: {{ error }}",
|
|
74
75
|
"ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
|
|
75
76
|
"begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
|
|
76
|
-
"stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
77
|
+
"stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, first token time: {{ first_token_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, speed: {{ speed }} tokens/s",
|
|
77
78
|
"quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
78
79
|
"upsert_file": "✅ Updated file: {{ file_path }}",
|
|
79
80
|
"unmerged_blocks_title": "Unmerged Blocks",
|
|
@@ -91,6 +92,7 @@ MESSAGES = {
|
|
|
91
92
|
"estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
|
|
92
93
|
},
|
|
93
94
|
"zh": {
|
|
95
|
+
"generation_cancelled": "[已中断] 生成已取消",
|
|
94
96
|
"model_not_found": "未找到模型: {{model_name}}",
|
|
95
97
|
"generating_shell_script": "正在生成 Shell 脚本",
|
|
96
98
|
"new_session_started": "新会话已开始。之前的聊天历史已存档。",
|
|
@@ -139,7 +141,7 @@ MESSAGES = {
|
|
|
139
141
|
"将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
|
|
140
142
|
),
|
|
141
143
|
"code_generation_start": "正在自动生成代码...",
|
|
142
|
-
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
|
|
144
|
+
"code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
143
145
|
"code_merge_start": "正在自动合并代码...",
|
|
144
146
|
"code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
|
|
145
147
|
"quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
|
|
@@ -171,7 +173,7 @@ MESSAGES = {
|
|
|
171
173
|
"ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
|
|
172
174
|
"ranking_process_failed": "排序过程失败: {{ error }}",
|
|
173
175
|
"ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
|
|
174
|
-
"stream_out_stats": "
|
|
176
|
+
"stream_out_stats": "总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
|
|
175
177
|
"quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
|
|
176
178
|
"quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
|
|
177
179
|
"quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
|
|
@@ -8,6 +8,8 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
|
8
8
|
import traceback
|
|
9
9
|
from autocoder.common.utils_code_auto_generate import chat_with_continue
|
|
10
10
|
from byzerllm.utils.str2model import to_model
|
|
11
|
+
|
|
12
|
+
from autocoder.utils.llms import get_llm_names
|
|
11
13
|
class RankResult(BaseModel):
|
|
12
14
|
rank_result: List[int]
|
|
13
15
|
|
|
@@ -78,9 +80,7 @@ class CodeModificationRanker:
|
|
|
78
80
|
# Submit tasks for each model and generate_times
|
|
79
81
|
futures = []
|
|
80
82
|
for llm in self.llms:
|
|
81
|
-
model_name =
|
|
82
|
-
if not model_name:
|
|
83
|
-
model_name = "unknown(without default model name)"
|
|
83
|
+
model_name = ",".join(get_llm_names(llm))
|
|
84
84
|
self.printer.print_in_terminal(
|
|
85
85
|
"ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
|
|
86
86
|
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
|
|
3
|
+
class GlobalCancel:
|
|
4
|
+
def __init__(self):
|
|
5
|
+
self._flag = False
|
|
6
|
+
self._lock = threading.Lock()
|
|
7
|
+
|
|
8
|
+
@property
|
|
9
|
+
def requested(self):
|
|
10
|
+
with self._lock:
|
|
11
|
+
return self._flag
|
|
12
|
+
|
|
13
|
+
def set(self):
|
|
14
|
+
with self._lock:
|
|
15
|
+
self._flag = True
|
|
16
|
+
|
|
17
|
+
def reset(self):
|
|
18
|
+
with self._lock:
|
|
19
|
+
self._flag = False
|
|
20
|
+
|
|
21
|
+
global_cancel = GlobalCancel()
|
|
@@ -26,6 +26,7 @@ from autocoder.utils.conversation_store import store_code_model_conversation
|
|
|
26
26
|
from loguru import logger
|
|
27
27
|
import time
|
|
28
28
|
from autocoder.common.printer import Printer
|
|
29
|
+
from autocoder.utils.llms import get_llm_names
|
|
29
30
|
|
|
30
31
|
|
|
31
32
|
class BaseAction:
|
|
@@ -123,11 +124,16 @@ class ActionTSProject(BaseAction):
|
|
|
123
124
|
generate_result = generate.single_round_run(
|
|
124
125
|
query=args.query, source_content=content
|
|
125
126
|
)
|
|
127
|
+
elapsed_time = time.time() - start_time
|
|
128
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
129
|
+
model_names = ",".join(get_llm_names(self.llm))
|
|
126
130
|
self.printer.print_in_terminal(
|
|
127
131
|
"code_generation_complete",
|
|
128
|
-
duration=
|
|
132
|
+
duration=elapsed_time,
|
|
129
133
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
130
|
-
output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
|
|
134
|
+
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
135
|
+
speed=round(speed, 2),
|
|
136
|
+
model_names=model_names
|
|
131
137
|
)
|
|
132
138
|
merge_result = None
|
|
133
139
|
if args.execute and args.auto_merge:
|
|
@@ -213,11 +219,16 @@ class ActionPyScriptProject(BaseAction):
|
|
|
213
219
|
query=args.query, source_content=content
|
|
214
220
|
)
|
|
215
221
|
|
|
222
|
+
elapsed_time = time.time() - start_time
|
|
223
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
224
|
+
model_names = ",".join(get_llm_names(self.llm))
|
|
216
225
|
self.printer.print_in_terminal(
|
|
217
226
|
"code_generation_complete",
|
|
218
|
-
duration=
|
|
227
|
+
duration=elapsed_time,
|
|
219
228
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
220
|
-
output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
|
|
229
|
+
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
230
|
+
speed=round(speed, 2),
|
|
231
|
+
model_names=model_names
|
|
221
232
|
)
|
|
222
233
|
merge_result = None
|
|
223
234
|
if args.execute and args.auto_merge:
|
|
@@ -335,11 +346,16 @@ class ActionPyProject(BaseAction):
|
|
|
335
346
|
generate_result = generate.single_round_run(
|
|
336
347
|
query=args.query, source_content=content
|
|
337
348
|
)
|
|
349
|
+
elapsed_time = time.time() - start_time
|
|
350
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
351
|
+
model_names = ",".join(get_llm_names(self.llm))
|
|
338
352
|
self.printer.print_in_terminal(
|
|
339
353
|
"code_generation_complete",
|
|
340
|
-
duration=
|
|
354
|
+
duration=elapsed_time,
|
|
341
355
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
342
|
-
output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
|
|
356
|
+
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
357
|
+
speed=round(speed, 2),
|
|
358
|
+
model_names=model_names
|
|
343
359
|
)
|
|
344
360
|
merge_result = None
|
|
345
361
|
if args.execute and args.auto_merge:
|
|
@@ -440,11 +456,16 @@ class ActionSuffixProject(BaseAction):
|
|
|
440
456
|
query=args.query, source_content=content
|
|
441
457
|
)
|
|
442
458
|
|
|
459
|
+
elapsed_time = time.time() - start_time
|
|
460
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
461
|
+
model_names = ",".join(get_llm_names(self.llm))
|
|
443
462
|
self.printer.print_in_terminal(
|
|
444
463
|
"code_generation_complete",
|
|
445
|
-
duration=
|
|
464
|
+
duration=elapsed_time,
|
|
446
465
|
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
447
|
-
output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
|
|
466
|
+
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
467
|
+
speed=round(speed, 2),
|
|
468
|
+
model_names=model_names
|
|
448
469
|
)
|
|
449
470
|
merge_result = None
|
|
450
471
|
if args.execute and args.auto_merge:
|
|
@@ -12,9 +12,10 @@ from autocoder.common.code_auto_generate_editblock import CodeAutoGenerateEditBl
|
|
|
12
12
|
from autocoder.index.entry import build_index_and_filter_files
|
|
13
13
|
from autocoder.regexproject import RegexProject
|
|
14
14
|
from autocoder.utils.conversation_store import store_code_model_conversation
|
|
15
|
-
from
|
|
15
|
+
from autocoder.common.printer import Printer
|
|
16
16
|
import time
|
|
17
|
-
|
|
17
|
+
from autocoder.utils.llms import get_llm_names
|
|
18
|
+
from loguru import logger
|
|
18
19
|
class ActionRegexProject:
|
|
19
20
|
def __init__(
|
|
20
21
|
self, args: AutoCoderArgs, llm: Optional[byzerllm.ByzerLLM] = None
|
|
@@ -22,6 +23,7 @@ class ActionRegexProject:
|
|
|
22
23
|
self.args = args
|
|
23
24
|
self.llm = llm
|
|
24
25
|
self.pp = None
|
|
26
|
+
self.printer = Printer()
|
|
25
27
|
|
|
26
28
|
def run(self):
|
|
27
29
|
args = self.args
|
|
@@ -58,7 +60,7 @@ class ActionRegexProject:
|
|
|
58
60
|
|
|
59
61
|
start_time = time.time()
|
|
60
62
|
if args.execute:
|
|
61
|
-
|
|
63
|
+
self.printer.print_in_terminal("code_generation_start")
|
|
62
64
|
|
|
63
65
|
if args.auto_merge == "diff":
|
|
64
66
|
generate = CodeAutoGenerateDiff(
|
|
@@ -83,10 +85,20 @@ class ActionRegexProject:
|
|
|
83
85
|
query=args.query, source_content=content
|
|
84
86
|
)
|
|
85
87
|
|
|
86
|
-
|
|
88
|
+
elapsed_time = time.time() - start_time
|
|
89
|
+
speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
|
|
90
|
+
model_names = ",".join(get_llm_names(self.llm))
|
|
91
|
+
self.printer.print_in_terminal(
|
|
92
|
+
"code_generation_complete",
|
|
93
|
+
duration=elapsed_time,
|
|
94
|
+
input_tokens=generate_result.metadata.get('input_tokens_count', 0),
|
|
95
|
+
output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
|
|
96
|
+
speed=round(speed, 2),
|
|
97
|
+
model_names=model_names
|
|
98
|
+
)
|
|
87
99
|
merge_result = None
|
|
88
100
|
if args.execute and args.auto_merge:
|
|
89
|
-
|
|
101
|
+
self.printer.print_in_terminal("code_merge_start")
|
|
90
102
|
if args.auto_merge == "diff":
|
|
91
103
|
code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
|
|
92
104
|
merge_result = code_merge.merge_code(generate_result=generate_result)
|
|
@@ -17,6 +17,8 @@ from autocoder.common.printer import Printer
|
|
|
17
17
|
from concurrent.futures import ThreadPoolExecutor
|
|
18
18
|
import threading
|
|
19
19
|
|
|
20
|
+
from autocoder.utils.llms import get_llm_names
|
|
21
|
+
|
|
20
22
|
|
|
21
23
|
def get_file_path(file_path):
|
|
22
24
|
if file_path.startswith("##"):
|
|
@@ -70,9 +72,7 @@ class QuickFilter():
|
|
|
70
72
|
|
|
71
73
|
def process_chunk(chunk_index: int, chunk: List[IndexItem]) -> None:
|
|
72
74
|
try:
|
|
73
|
-
model_name =
|
|
74
|
-
if not model_name:
|
|
75
|
-
model_name = "unknown(without default model name)"
|
|
75
|
+
model_name = ",".join(get_llm_names(self.index_manager.index_filter_llm))
|
|
76
76
|
|
|
77
77
|
if chunk_index == 0:
|
|
78
78
|
# 第一个chunk使用流式输出
|
|
@@ -180,9 +180,7 @@ class QuickFilter():
|
|
|
180
180
|
return self.big_filter(index_items)
|
|
181
181
|
|
|
182
182
|
try:
|
|
183
|
-
model_name =
|
|
184
|
-
if not model_name:
|
|
185
|
-
model_name = "unknown(without default model name)"
|
|
183
|
+
model_name = ",".join(get_llm_names(self.index_manager.index_filter_llm))
|
|
186
184
|
|
|
187
185
|
# 渲染 Prompt 模板
|
|
188
186
|
query = self.quick_filter_files.prompt(index_items, self.args.query)
|
autocoder/index/index.py
CHANGED
|
@@ -22,7 +22,8 @@ from autocoder.index.types import (
|
|
|
22
22
|
TargetFile,
|
|
23
23
|
FileList,
|
|
24
24
|
)
|
|
25
|
-
|
|
25
|
+
from autocoder.common.global_cancel import global_cancel
|
|
26
|
+
from autocoder.utils.llms import get_llm_names
|
|
26
27
|
class IndexManager:
|
|
27
28
|
def __init__(
|
|
28
29
|
self, llm: byzerllm.ByzerLLM, sources: List[SourceCode], args: AutoCoderArgs
|
|
@@ -195,7 +196,10 @@ class IndexManager:
|
|
|
195
196
|
return True
|
|
196
197
|
return False
|
|
197
198
|
|
|
198
|
-
def build_index_for_single_source(self, source: SourceCode):
|
|
199
|
+
def build_index_for_single_source(self, source: SourceCode):
|
|
200
|
+
if global_cancel.requested:
|
|
201
|
+
return None
|
|
202
|
+
|
|
199
203
|
file_path = source.module_name
|
|
200
204
|
if not os.path.exists(file_path):
|
|
201
205
|
return None
|
|
@@ -205,9 +209,7 @@ class IndexManager:
|
|
|
205
209
|
|
|
206
210
|
md5 = hashlib.md5(source.source_code.encode("utf-8")).hexdigest()
|
|
207
211
|
|
|
208
|
-
model_name =
|
|
209
|
-
if not model_name:
|
|
210
|
-
model_name = "unknown(without default model name)"
|
|
212
|
+
model_name = ",".join(get_llm_names(self.index_llm))
|
|
211
213
|
|
|
212
214
|
try:
|
|
213
215
|
start_time = time.monotonic()
|
|
@@ -314,6 +316,9 @@ class IndexManager:
|
|
|
314
316
|
):
|
|
315
317
|
wait_to_build_files.append(source)
|
|
316
318
|
|
|
319
|
+
# Remove duplicates based on module_name
|
|
320
|
+
wait_to_build_files = list({source.module_name: source for source in wait_to_build_files}.values())
|
|
321
|
+
|
|
317
322
|
counter = 0
|
|
318
323
|
num_files = len(wait_to_build_files)
|
|
319
324
|
total_files = len(self.sources)
|
|
@@ -329,6 +334,8 @@ class IndexManager:
|
|
|
329
334
|
for source in wait_to_build_files
|
|
330
335
|
]
|
|
331
336
|
for future in as_completed(futures):
|
|
337
|
+
if global_cancel.requested:
|
|
338
|
+
break
|
|
332
339
|
result = future.result()
|
|
333
340
|
if result is not None:
|
|
334
341
|
counter += 1
|
|
@@ -345,7 +352,7 @@ class IndexManager:
|
|
|
345
352
|
with open(self.index_file, "w") as file:
|
|
346
353
|
json.dump(index_data, file, ensure_ascii=False, indent=2)
|
|
347
354
|
updated_sources = []
|
|
348
|
-
|
|
355
|
+
|
|
349
356
|
# 如果 updated_sources 或 keys_to_remove 有值,则保存索引文件
|
|
350
357
|
if updated_sources or keys_to_remove:
|
|
351
358
|
with open(self.index_file, "w") as file:
|
autocoder/models.py
CHANGED
|
@@ -2,7 +2,6 @@ import os
|
|
|
2
2
|
import json
|
|
3
3
|
from typing import List, Dict
|
|
4
4
|
from urllib.parse import urlparse
|
|
5
|
-
from autocoder.common.auto_coder_lang import get_message_with_format
|
|
6
5
|
|
|
7
6
|
MODELS_JSON = os.path.expanduser("~/.auto-coder/keys/models.json")
|
|
8
7
|
|
|
@@ -15,7 +14,10 @@ default_models_list = [
|
|
|
15
14
|
"model_type": "saas/openai",
|
|
16
15
|
"base_url": "https://api.deepseek.com/v1",
|
|
17
16
|
"api_key_path": "api.deepseek.com",
|
|
18
|
-
"is_reasoning": True
|
|
17
|
+
"is_reasoning": True,
|
|
18
|
+
"input_price": 0.0, # 单位:M/百万 input tokens
|
|
19
|
+
"output_price": 0.0, # 单位:M/百万 output tokens
|
|
20
|
+
"average_speed": 0.0 # 单位:秒/请求
|
|
19
21
|
},
|
|
20
22
|
{
|
|
21
23
|
"name": "deepseek_chat",
|
|
@@ -24,7 +26,10 @@ default_models_list = [
|
|
|
24
26
|
"model_type": "saas/openai",
|
|
25
27
|
"base_url": "https://api.deepseek.com/v1",
|
|
26
28
|
"api_key_path": "api.deepseek.com",
|
|
27
|
-
"is_reasoning": False
|
|
29
|
+
"is_reasoning": False,
|
|
30
|
+
"input_price": 0.0,
|
|
31
|
+
"output_price": 0.0,
|
|
32
|
+
"average_speed": 0.0
|
|
28
33
|
},
|
|
29
34
|
{
|
|
30
35
|
"name":"o1",
|
|
@@ -33,7 +38,10 @@ default_models_list = [
|
|
|
33
38
|
"model_type": "saas/openai",
|
|
34
39
|
"base_url": "https://api.openai.com/v1",
|
|
35
40
|
"api_key_path": "",
|
|
36
|
-
"is_reasoning": True
|
|
41
|
+
"is_reasoning": True,
|
|
42
|
+
"input_price": 0.0,
|
|
43
|
+
"output_price": 0.0,
|
|
44
|
+
"average_speed": 0.0
|
|
37
45
|
}
|
|
38
46
|
]
|
|
39
47
|
|
|
@@ -106,6 +114,7 @@ def get_model_by_name(name: str) -> Dict:
|
|
|
106
114
|
"""
|
|
107
115
|
根据模型名称查找模型
|
|
108
116
|
"""
|
|
117
|
+
from autocoder.common.auto_coder_lang import get_message_with_format
|
|
109
118
|
models = load_models()
|
|
110
119
|
v = [m for m in models if m["name"] == name.strip()]
|
|
111
120
|
|
|
@@ -114,6 +123,78 @@ def get_model_by_name(name: str) -> Dict:
|
|
|
114
123
|
return v[0]
|
|
115
124
|
|
|
116
125
|
|
|
126
|
+
def update_model_input_price(name: str, price: float) -> bool:
|
|
127
|
+
"""更新模型输入价格
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
name: 模型名称
|
|
131
|
+
price: 输入价格(M/百万input tokens)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
bool: 是否更新成功
|
|
135
|
+
"""
|
|
136
|
+
if price < 0:
|
|
137
|
+
raise ValueError("Price cannot be negative")
|
|
138
|
+
|
|
139
|
+
models = load_models()
|
|
140
|
+
updated = False
|
|
141
|
+
for model in models:
|
|
142
|
+
if model["name"] == name:
|
|
143
|
+
model["input_price"] = float(price)
|
|
144
|
+
updated = True
|
|
145
|
+
break
|
|
146
|
+
if updated:
|
|
147
|
+
save_models(models)
|
|
148
|
+
return updated
|
|
149
|
+
|
|
150
|
+
def update_model_output_price(name: str, price: float) -> bool:
|
|
151
|
+
"""更新模型输出价格
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
name: 模型名称
|
|
155
|
+
price: 输出价格(M/百万output tokens)
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
bool: 是否更新成功
|
|
159
|
+
"""
|
|
160
|
+
if price < 0:
|
|
161
|
+
raise ValueError("Price cannot be negative")
|
|
162
|
+
|
|
163
|
+
models = load_models()
|
|
164
|
+
updated = False
|
|
165
|
+
for model in models:
|
|
166
|
+
if model["name"] == name:
|
|
167
|
+
model["output_price"] = float(price)
|
|
168
|
+
updated = True
|
|
169
|
+
break
|
|
170
|
+
if updated:
|
|
171
|
+
save_models(models)
|
|
172
|
+
return updated
|
|
173
|
+
|
|
174
|
+
def update_model_speed(name: str, speed: float) -> bool:
|
|
175
|
+
"""更新模型平均速度
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
name: 模型名称
|
|
179
|
+
speed: 速度(秒/请求)
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
bool: 是否更新成功
|
|
183
|
+
"""
|
|
184
|
+
if speed <= 0:
|
|
185
|
+
raise ValueError("Speed must be positive")
|
|
186
|
+
|
|
187
|
+
models = load_models()
|
|
188
|
+
updated = False
|
|
189
|
+
for model in models:
|
|
190
|
+
if model["name"] == name:
|
|
191
|
+
model["average_speed"] = float(speed)
|
|
192
|
+
updated = True
|
|
193
|
+
break
|
|
194
|
+
if updated:
|
|
195
|
+
save_models(models)
|
|
196
|
+
return updated
|
|
197
|
+
|
|
117
198
|
def check_model_exists(name: str) -> bool:
|
|
118
199
|
"""
|
|
119
200
|
检查模型是否存在
|
|
@@ -124,14 +205,14 @@ def check_model_exists(name: str) -> bool:
|
|
|
124
205
|
def update_model_with_api_key(name: str, api_key: str) -> Dict:
|
|
125
206
|
"""
|
|
126
207
|
根据模型名称查找并更新模型的 api_key_path。
|
|
127
|
-
|
|
208
|
+
如果找到模型,会根据其 base_url 处理 api_key_path。
|
|
128
209
|
|
|
129
210
|
Args:
|
|
130
211
|
name: 模型名称
|
|
131
212
|
api_key: API密钥
|
|
132
213
|
|
|
133
214
|
Returns:
|
|
134
|
-
Dict:
|
|
215
|
+
Dict: 更新后的模型信息,如果未找到则返回None
|
|
135
216
|
"""
|
|
136
217
|
models = load_models()
|
|
137
218
|
|
autocoder/rag/doc_filter.py
CHANGED
|
@@ -91,9 +91,7 @@ class DocFilter:
|
|
|
91
91
|
def _run(conversations, docs):
|
|
92
92
|
submit_time_1 = time.time()
|
|
93
93
|
try:
|
|
94
|
-
llm =
|
|
95
|
-
llm.skip_nontext_check = True
|
|
96
|
-
llm.setup_default_model_name(self.recall_llm.default_model_name)
|
|
94
|
+
llm = self.recall_llm
|
|
97
95
|
|
|
98
96
|
v = (
|
|
99
97
|
_check_relevance_with_conversation.with_llm(
|
|
@@ -52,11 +52,13 @@ class LongContextRAG:
|
|
|
52
52
|
) -> None:
|
|
53
53
|
self.llm = llm
|
|
54
54
|
self.args = args
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
55
|
+
if args.product_mode == "pro":
|
|
56
|
+
self.index_model = byzerllm.ByzerLLM()
|
|
57
|
+
self.index_model.setup_default_model_name(
|
|
58
|
+
args.index_model or self.llm.default_model_name
|
|
59
|
+
)
|
|
60
|
+
else:
|
|
61
|
+
self.index_model = self.llm
|
|
60
62
|
|
|
61
63
|
self.path = path
|
|
62
64
|
self.relevant_score = self.args.rag_doc_filter_relevance or 5
|
autocoder/rag/token_limiter.py
CHANGED
|
@@ -224,9 +224,7 @@ class TokenLimiter:
|
|
|
224
224
|
for idx, line in enumerate(source_code_lines):
|
|
225
225
|
source_code_with_line_number += f"{idx+1} {line}\n"
|
|
226
226
|
|
|
227
|
-
llm =
|
|
228
|
-
llm.skip_nontext_check = True
|
|
229
|
-
llm.setup_default_model_name(self.chunk_llm.default_model_name)
|
|
227
|
+
llm = self.chunk_llm
|
|
230
228
|
|
|
231
229
|
extracted_info = (
|
|
232
230
|
self.extract_relevance_range_from_docs_with_conversation.options(
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from rich.console import Console
|
|
2
|
+
from autocoder.common.printer import Printer
|
|
2
3
|
from rich.live import Live
|
|
3
4
|
from rich.panel import Panel
|
|
4
5
|
from rich.markdown import Markdown
|
|
@@ -11,6 +12,7 @@ from autocoder.utils.request_queue import request_queue
|
|
|
11
12
|
import time
|
|
12
13
|
from byzerllm.utils.types import SingleOutputMeta
|
|
13
14
|
from autocoder.common import AutoCoderArgs
|
|
15
|
+
from autocoder.common.global_cancel import global_cancel
|
|
14
16
|
|
|
15
17
|
MAX_HISTORY_LINES = 40 # 最大保留历史行数
|
|
16
18
|
|
|
@@ -172,7 +174,9 @@ def stream_out(
|
|
|
172
174
|
current_line = "" # 当前行
|
|
173
175
|
assistant_response = ""
|
|
174
176
|
last_meta = None
|
|
175
|
-
panel_title = title if title is not None else f"Response[ {model_name} ]"
|
|
177
|
+
panel_title = title if title is not None else f"Response[ {model_name} ]"
|
|
178
|
+
first_token_time = 0.0
|
|
179
|
+
first_token_time_start = time.time()
|
|
176
180
|
try:
|
|
177
181
|
with Live(
|
|
178
182
|
Panel("", title=panel_title, border_style="green"),
|
|
@@ -180,6 +184,10 @@ def stream_out(
|
|
|
180
184
|
console=console
|
|
181
185
|
) as live:
|
|
182
186
|
for res in stream_generator:
|
|
187
|
+
if global_cancel.requested:
|
|
188
|
+
printer = Printer(console)
|
|
189
|
+
printer.print_in_terminal("generation_cancelled")
|
|
190
|
+
break
|
|
183
191
|
last_meta = res[1]
|
|
184
192
|
content = res[0]
|
|
185
193
|
reasoning_content = last_meta.reasoning_content
|
|
@@ -187,6 +195,9 @@ def stream_out(
|
|
|
187
195
|
if reasoning_content == "" and content == "":
|
|
188
196
|
continue
|
|
189
197
|
|
|
198
|
+
if first_token_time == 0.0:
|
|
199
|
+
first_token_time = time.time() - first_token_time_start
|
|
200
|
+
|
|
190
201
|
if keep_reasoning_content:
|
|
191
202
|
# 处理思考内容
|
|
192
203
|
if reasoning_content:
|
|
@@ -280,5 +291,5 @@ def stream_out(
|
|
|
280
291
|
status=RequestOption.COMPLETED
|
|
281
292
|
),
|
|
282
293
|
)
|
|
283
|
-
|
|
294
|
+
last_meta.first_token_time = first_token_time
|
|
284
295
|
return assistant_response, last_meta
|
autocoder/utils/llms.py
CHANGED
|
@@ -1,7 +1,21 @@
|
|
|
1
1
|
import byzerllm
|
|
2
|
-
from
|
|
2
|
+
from typing import Union,Optional
|
|
3
|
+
|
|
4
|
+
def get_llm_names(llm: Union[byzerllm.ByzerLLM, byzerllm.SimpleByzerLLM,str],target_model_type:Optional[str]=None):
|
|
5
|
+
if target_model_type is None:
|
|
6
|
+
return [llm.default_model_name for llm in [llm] if llm.default_model_name]
|
|
7
|
+
llms = llm.get_sub_client(target_model_type)
|
|
8
|
+
if llms is None:
|
|
9
|
+
return [llm.default_model_name for llm in [llm] if llm.default_model_name]
|
|
10
|
+
elif isinstance(llms, list):
|
|
11
|
+
return [llm.default_model_name for llm in llms if llm.default_model_name]
|
|
12
|
+
elif isinstance(llms,str) and llms:
|
|
13
|
+
return llms.split(",")
|
|
14
|
+
else:
|
|
15
|
+
return [llm.default_model_name for llm in [llms] if llm.default_model_name]
|
|
3
16
|
|
|
4
17
|
def get_single_llm(model_names: str, product_mode: str):
|
|
18
|
+
from autocoder import models as models_module
|
|
5
19
|
if product_mode == "pro":
|
|
6
20
|
if "," in model_names:
|
|
7
21
|
# Multiple code models specified
|