auto-coder 0.1.251__py3-none-any.whl → 0.1.253__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (32) hide show
  1. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/METADATA +2 -2
  2. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/RECORD +32 -30
  3. autocoder/auto_coder.py +32 -7
  4. autocoder/auto_coder_rag.py +198 -35
  5. autocoder/chat_auto_coder.py +73 -19
  6. autocoder/chat_auto_coder_lang.py +21 -3
  7. autocoder/common/__init__.py +2 -0
  8. autocoder/common/auto_coder_lang.py +6 -4
  9. autocoder/common/code_auto_merge.py +1 -1
  10. autocoder/common/code_auto_merge_diff.py +1 -1
  11. autocoder/common/code_auto_merge_editblock.py +1 -1
  12. autocoder/common/code_auto_merge_strict_diff.py +1 -1
  13. autocoder/common/code_modification_ranker.py +3 -3
  14. autocoder/common/global_cancel.py +21 -0
  15. autocoder/common/mcp_server.py +3 -4
  16. autocoder/common/mcp_servers/mcp_server_perplexity.py +1 -1
  17. autocoder/dispacher/actions/action.py +29 -8
  18. autocoder/dispacher/actions/plugins/action_regex_project.py +17 -5
  19. autocoder/index/filter/quick_filter.py +4 -6
  20. autocoder/index/index.py +13 -6
  21. autocoder/models.py +87 -6
  22. autocoder/rag/doc_filter.py +2 -4
  23. autocoder/rag/long_context_rag.py +8 -6
  24. autocoder/rag/token_limiter.py +1 -3
  25. autocoder/utils/auto_coder_utils/chat_stream_out.py +40 -17
  26. autocoder/utils/llms.py +15 -1
  27. autocoder/utils/thread_utils.py +201 -0
  28. autocoder/version.py +1 -1
  29. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/LICENSE +0 -0
  30. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/WHEEL +0 -0
  31. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/entry_points.txt +0 -0
  32. {auto_coder-0.1.251.dist-info → auto_coder-0.1.253.dist-info}/top_level.txt +0 -0
@@ -49,12 +49,12 @@ from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRe
49
49
  import byzerllm
50
50
  from byzerllm.utils import format_str_jinja2
51
51
  from autocoder.common.memory_manager import get_global_memory_file_paths
52
- from autocoder import models
52
+ from autocoder import models as models_module
53
53
  import shlex
54
54
  from autocoder.utils.llms import get_single_llm
55
55
  import pkg_resources
56
56
  from autocoder.common.printer import Printer
57
- from byzerllm.utils.langutil import run_in_thread,run_in_raw_thread
57
+ from autocoder.utils.thread_utils import run_in_thread,run_in_raw_thread
58
58
 
59
59
  class SymbolItem(BaseModel):
60
60
  symbol_name: str
@@ -136,6 +136,8 @@ commands = [
136
136
 
137
137
 
138
138
  def show_help():
139
+ print(f"\033[1m{get_message('official_doc')}\033[0m")
140
+ print()
139
141
  print(f"\033[1m{get_message('supported_commands')}\033[0m")
140
142
  print()
141
143
  print(
@@ -1537,11 +1539,12 @@ def mcp(query: str):
1537
1539
  if os.path.exists(temp_yaml):
1538
1540
  os.remove(temp_yaml)
1539
1541
 
1540
- mcp_server = get_mcp_server()
1542
+ mcp_server = get_mcp_server()
1541
1543
  response = mcp_server.send_request(
1542
1544
  McpRequest(
1543
1545
  query=query,
1544
- model=args.inference_model or args.model
1546
+ model=args.inference_model or args.model,
1547
+ product_mode=args.product_mode
1545
1548
  )
1546
1549
  )
1547
1550
 
@@ -1562,19 +1565,19 @@ def mcp(query: str):
1562
1565
  file_path = os.path.join(mcp_dir, f"{timestamp}.md")
1563
1566
 
1564
1567
  # Format response as markdown
1565
- markdown_content = f"# {printer.get_message_from_key('mcp_response_title')}\n\n{response.result}"
1568
+ markdown_content = response.result
1566
1569
 
1567
1570
  # Save to file
1568
1571
  with open(file_path, "w", encoding="utf-8") as f:
1569
1572
  f.write(markdown_content)
1570
1573
 
1571
- # Print with markdown formatting
1572
- printer.print_panel(
1573
- Markdown(markdown_content),
1574
- text_options={"justify": "left"},
1575
- panel_options={
1576
- "border_style": "green"
1577
- }
1574
+ console = Console()
1575
+ console.print(
1576
+ Panel(
1577
+ Markdown(markdown_content, justify="left"),
1578
+ title=printer.get_message_from_key('mcp_response_title'),
1579
+ border_style="green"
1580
+ )
1578
1581
  )
1579
1582
 
1580
1583
 
@@ -1713,7 +1716,7 @@ def commit(query: str):
1713
1716
  if os.path.exists(temp_yaml):
1714
1717
  os.remove(temp_yaml)
1715
1718
 
1716
- target_model = args.code_model or args.model
1719
+ target_model = args.commit_model or args.model
1717
1720
  llm = get_single_llm(target_model, product_mode)
1718
1721
  printer = Printer()
1719
1722
  printer.print_in_terminal("commit_generating", style="yellow", model_name=target_model)
@@ -1739,7 +1742,7 @@ def commit(query: str):
1739
1742
  md5 = hashlib.md5(file_content.encode("utf-8")).hexdigest()
1740
1743
  file_name = os.path.basename(execute_file)
1741
1744
  commit_result = git_utils.commit_changes(
1742
- ".", f"auto_coder_{file_name}_{md5}"
1745
+ ".", f"auto_coder_{file_name}_{md5}\n{commit_message}"
1743
1746
  )
1744
1747
  git_utils.print_commit_info(commit_result=commit_result)
1745
1748
  if commit_message:
@@ -2171,7 +2174,7 @@ def manage_models(params, query: str):
2171
2174
  printer.print_in_terminal("models_lite_only", style="red")
2172
2175
  return
2173
2176
 
2174
- models_data = models.load_models()
2177
+ models_data = models_module.load_models()
2175
2178
  subcmd = ""
2176
2179
  if "/list" in query:
2177
2180
  subcmd = "/list"
@@ -2208,6 +2211,9 @@ def manage_models(params, query: str):
2208
2211
  table.add_column("Name", style="cyan", width=40, no_wrap=False)
2209
2212
  table.add_column("Model Name", style="magenta", width=30, overflow="fold")
2210
2213
  table.add_column("Base URL", style="white", width=50, overflow="fold")
2214
+ table.add_column("Input Price (M)", style="magenta", width=15)
2215
+ table.add_column("Output Price (M)", style="magenta", width=15)
2216
+ table.add_column("Speed (s/req)", style="blue", width=15)
2211
2217
  for m in models_data:
2212
2218
  # Check if api_key_path exists and file exists
2213
2219
  is_api_key_set = "api_key" in m
@@ -2221,19 +2227,67 @@ def manage_models(params, query: str):
2221
2227
  table.add_row(
2222
2228
  name,
2223
2229
  m.get("model_name", ""),
2224
- m.get("base_url", "")
2230
+ m.get("base_url", ""),
2231
+ f"{m.get('input_price', 0.0):.2f}",
2232
+ f"{m.get('output_price', 0.0):.2f}",
2233
+ f"{m.get('average_speed', 0.0):.3f}"
2225
2234
  )
2226
2235
  console.print(table)
2227
2236
  else:
2228
2237
  printer.print_in_terminal("models_no_models", style="yellow")
2229
2238
 
2239
+ elif subcmd == "/input_price":
2240
+ args = query.strip().split()
2241
+ if len(args) >= 2:
2242
+ name = args[0]
2243
+ try:
2244
+ price = float(args[1])
2245
+ if models_module.update_model_input_price(name, price):
2246
+ printer.print_in_terminal("models_input_price_updated", style="green", name=name, price=price)
2247
+ else:
2248
+ printer.print_in_terminal("models_not_found", style="red", name=name)
2249
+ except ValueError as e:
2250
+ printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
2251
+ else:
2252
+ printer.print_in_terminal("models_input_price_usage", style="red")
2253
+
2254
+ elif subcmd == "/output_price":
2255
+ args = query.strip().split()
2256
+ if len(args) >= 2:
2257
+ name = args[0]
2258
+ try:
2259
+ price = float(args[1])
2260
+ if models_module.update_model_output_price(name, price):
2261
+ printer.print_in_terminal("models_output_price_updated", style="green", name=name, price=price)
2262
+ else:
2263
+ printer.print_in_terminal("models_not_found", style="red", name=name)
2264
+ except ValueError as e:
2265
+ printer.print_in_terminal("models_invalid_price", style="red", error=str(e))
2266
+ else:
2267
+ printer.print_in_terminal("models_output_price_usage", style="red")
2268
+
2269
+ elif subcmd == "/speed":
2270
+ args = query.strip().split()
2271
+ if len(args) >= 2:
2272
+ name = args[0]
2273
+ try:
2274
+ speed = float(args[1])
2275
+ if models_module.update_model_speed(name, speed):
2276
+ printer.print_in_terminal("models_speed_updated", style="green", name=name, speed=speed)
2277
+ else:
2278
+ printer.print_in_terminal("models_not_found", style="red", name=name)
2279
+ except ValueError as e:
2280
+ printer.print_in_terminal("models_invalid_speed", style="red", error=str(e))
2281
+ else:
2282
+ printer.print_in_terminal("models_speed_usage", style="red")
2283
+
2230
2284
  elif subcmd == "/add":
2231
2285
  # Support both simplified and legacy formats
2232
2286
  args = query.strip().split(" ")
2233
2287
  if len(args) == 2:
2234
2288
  # Simplified: /models /add <name> <api_key>
2235
2289
  name, api_key = args[0], args[1]
2236
- result = models.update_model_with_api_key(name, api_key)
2290
+ result = models_module.update_model_with_api_key(name, api_key)
2237
2291
  if result:
2238
2292
  printer.print_in_terminal("models_added", style="green", name=name)
2239
2293
  else:
@@ -2275,7 +2329,7 @@ def manage_models(params, query: str):
2275
2329
  }
2276
2330
 
2277
2331
  models_data.append(final_model)
2278
- models.save_models(models_data)
2332
+ models_module.save_models(models_data)
2279
2333
  printer.print_in_terminal("models_add_model_success", style="green", name=data_dict["name"])
2280
2334
 
2281
2335
  elif subcmd == "/remove":
@@ -2288,7 +2342,7 @@ def manage_models(params, query: str):
2288
2342
  if len(filtered_models) == len(models_data):
2289
2343
  printer.print_in_terminal("models_add_model_remove", style="yellow", name=name)
2290
2344
  return
2291
- models.save_models(filtered_models)
2345
+ models_module.save_models(filtered_models)
2292
2346
  printer.print_in_terminal("models_add_model_removed", style="green", name=name)
2293
2347
 
2294
2348
  else:
@@ -85,7 +85,7 @@ MESSAGES = {
85
85
  "design_desc": "Generate SVG image based on the provided description",
86
86
  "commit_desc": "Auto generate yaml file and commit changes based on user's manual changes",
87
87
  "models_desc": "Manage model configurations, only available in lite mode",
88
- "models_usage": "Usage: /models /list|/add|/add_model|/remove ...",
88
+ "models_usage": "Usage: /models /list|/add|/add_model|/remove|/price|/speed ...",
89
89
  "models_added": "Added/Updated model '{{name}}' successfully.",
90
90
  "models_add_failed": "Failed to add model '{{name}}'. Model not found in defaults.",
91
91
  "models_add_usage": "Usage: /models /add <name> <api_key> or\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
@@ -96,6 +96,14 @@ MESSAGES = {
96
96
  "models_add_model_remove": "Model '{{name}}' not found.",
97
97
  "models_add_model_removed": "Removed model: {{name}}",
98
98
  "models_unknown_subcmd": "Unknown subcommand: {{subcmd}}",
99
+ "models_input_price_updated": "Updated input price for model {{name}} to {{price}} M/token",
100
+ "models_output_price_updated": "Updated output price for model {{name}} to {{price}} M/token",
101
+ "models_invalid_price": "Invalid price value: {{error}}",
102
+ "models_input_price_usage": "Usage: /models /input_price <name> <value>",
103
+ "models_output_price_usage": "Usage: /models /output_price <name> <value>",
104
+ "models_speed_updated": "Updated speed for model {{name}} to {{speed}} s/request",
105
+ "models_invalid_speed": "Invalid speed value: {{error}}",
106
+ "models_speed_usage": "Usage: /models /speed <name> <value>",
99
107
  "models_title": "All Models (内置 + models.json)",
100
108
  "models_no_models": "No models found.",
101
109
  "models_lite_only": "The /models command is only available in lite mode",
@@ -117,6 +125,7 @@ MESSAGES = {
117
125
  "commit_message": "{{ model_name }} Generated commit message: {{ message }}",
118
126
  "commit_failed": "{{ model_name }} Failed to generate commit message: {{ error }}",
119
127
  "confirm_execute": "Do you want to execute this script?",
128
+ "official_doc": "Official Documentation: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
120
129
  },
121
130
  "zh": {
122
131
  "commit_generating": "{{ model_name }} 正在生成提交信息...",
@@ -204,7 +213,7 @@ MESSAGES = {
204
213
  "conf_value": "值",
205
214
  "conf_title": "配置设置",
206
215
  "conf_subtitle": "使用 /conf <key>:<value> 修改这些设置",
207
- "models_usage": "用法: /models /list|/add|/add_model|/remove ...",
216
+ "models_usage": "用法: /models /list|/add|/add_model|/remove|/price|/speed ...",
208
217
  "models_added": "成功添加/更新模型 '{{name}}'。",
209
218
  "models_add_failed": "添加模型 '{{name}}' 失败。在默认模型中未找到该模型。",
210
219
  "models_add_usage": "用法: /models /add <name> <api_key> 或\n/models /add <name> <model_type> <model_name> <base_url> <api_key_path> [description]",
@@ -215,6 +224,14 @@ MESSAGES = {
215
224
  "models_add_model_remove": "找不到模型 '{{name}}'。",
216
225
  "models_add_model_removed": "已移除模型: {{name}}",
217
226
  "models_unknown_subcmd": "未知的子命令: {{subcmd}}",
227
+ "models_input_price_updated": "已更新模型 {{name}} 的输入价格为 {{price}} M/token",
228
+ "models_output_price_updated": "已更新模型 {{name}} 的输出价格为 {{price}} M/token",
229
+ "models_invalid_price": "无效的价格值: {{error}}",
230
+ "models_input_price_usage": "用法: /models /input_price <name> <value>",
231
+ "models_output_price_usage": "用法: /models /output_price <name> <value>",
232
+ "models_speed_updated": "已更新模型 {{name}} 的速度为 {{speed}} 秒/请求",
233
+ "models_invalid_speed": "无效的速度值: {{error}}",
234
+ "models_speed_usage": "用法: /models /speed <name> <value>",
218
235
  "models_title": "所有模型 (内置 + models.json)",
219
236
  "models_no_models": "未找到任何模型。",
220
237
  "models_lite_only": "/models 命令仅在 lite 模式下可用",
@@ -232,7 +249,8 @@ MESSAGES = {
232
249
  "remove_files_none": "没有文件被移除。",
233
250
  "files_removed": "移除的文件",
234
251
  "models_api_key_empty": "警告: {{name}} API key 为空。请设置一个有效的 API key。",
235
- "confirm_execute": "是否执行此脚本?",
252
+ "confirm_execute": "是否执行此脚本?",
253
+ "official_doc": "官方文档: https://uelng8wukz.feishu.cn/wiki/NhPNwSRcWimKFIkQINIckloBncI",
236
254
  }
237
255
  }
238
256
 
@@ -254,6 +254,7 @@ class AutoCoderArgs(pydantic.BaseModel):
254
254
  planner_model: Optional[str] = ""
255
255
  voice2text_model: Optional[str] = ""
256
256
  text2voice_model: Optional[str] = ""
257
+ commit_model: Optional[str] = ""
257
258
 
258
259
  skip_build_index: Optional[bool] = False
259
260
  skip_filter_index: Optional[bool] = False
@@ -358,6 +359,7 @@ class AutoCoderArgs(pydantic.BaseModel):
358
359
  product_mode: Optional[str] = "lite"
359
360
 
360
361
  keep_reasoning_content: Optional[bool] = False
362
+ keep_only_reasoning_content: Optional[bool] = False
361
363
 
362
364
  in_code_apply: bool = False
363
365
 
@@ -3,6 +3,7 @@ from byzerllm.utils import format_str_jinja2
3
3
 
4
4
  MESSAGES = {
5
5
  "en": {
6
+ "generation_cancelled": "[Interrupted] Generation cancelled",
6
7
  "model_not_found": "Model {{model_name}} not found",
7
8
  "generating_shell_script": "Generating Shell Script",
8
9
  "new_session_started": "New session started. Previous chat history has been archived.",
@@ -51,7 +52,7 @@ MESSAGES = {
51
52
  "Paste the answer to the input box below, use '/break' to exit, '/clear' to clear the screen, '/eof' to submit."
52
53
  ),
53
54
  "code_generation_start": "Auto generate the code...",
54
- "code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}",
55
+ "code_generation_complete": "Code generation completed in {{ duration }} seconds, input_tokens_count: {{ input_tokens }}, generated_tokens_count: {{ output_tokens }}, speed: {{ speed }} tokens/s",
55
56
  "code_merge_start": "Auto merge the code...",
56
57
  "code_execution_warning": "Content(send to model) is {{ content_length }} tokens (you may collect too much files), which is larger than the maximum input length {{ max_length }}",
57
58
  "quick_filter_start": "{{ model_name }} Starting filter context(quick_filter)...",
@@ -73,7 +74,7 @@ MESSAGES = {
73
74
  "ranking_process_failed": "Ranking process failed: {{ error }}",
74
75
  "ranking_failed": "Ranking failed in {{ elapsed }}s, using original order",
75
76
  "begin_index_source_code": "🚀 Begin to index source code in {{ source_dir }}",
76
- "stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
77
+ "stream_out_stats": "Elapsed time {{ elapsed_time }} seconds, first token time: {{ first_token_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}, speed: {{ speed }} tokens/s",
77
78
  "quick_filter_stats": "快速过滤器完成,耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
78
79
  "upsert_file": "✅ Updated file: {{ file_path }}",
79
80
  "unmerged_blocks_title": "Unmerged Blocks",
@@ -91,6 +92,7 @@ MESSAGES = {
91
92
  "estimated_input_tokens_in_generate": "Estimated input tokens in generate ({{ generate_mode }}): {{ estimated_input_tokens }}",
92
93
  },
93
94
  "zh": {
95
+ "generation_cancelled": "[已中断] 生成已取消",
94
96
  "model_not_found": "未找到模型: {{model_name}}",
95
97
  "generating_shell_script": "正在生成 Shell 脚本",
96
98
  "new_session_started": "新会话已开始。之前的聊天历史已存档。",
@@ -139,7 +141,7 @@ MESSAGES = {
139
141
  "将获得答案黏贴到下面的输入框,换行后,使用 '/break' 退出,'/clear' 清屏,'/eof' 提交。"
140
142
  ),
141
143
  "code_generation_start": "正在自动生成代码...",
142
- "code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
144
+ "code_generation_complete": "代码生成完成,耗时 {{ duration }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
143
145
  "code_merge_start": "正在自动合并代码...",
144
146
  "code_execution_warning": "发送给模型的内容长度为 {{ content_length }} tokens(您可能收集了太多文件),超过了最大输入长度 {{ max_length }}",
145
147
  "quick_filter_start": "{{ model_name }} 开始查找上下文(quick_filter)...",
@@ -171,7 +173,7 @@ MESSAGES = {
171
173
  "ranking_complete": "排序完成,耗时 {{ elapsed }} 秒,总投票数: {{ total_tasks }},最佳候选索引: {{ best_candidate }},得分: {{ scores }},输入token数: {{ input_tokens }},输出token数: {{ output_tokens }}",
172
174
  "ranking_process_failed": "排序过程失败: {{ error }}",
173
175
  "ranking_failed": "排序失败,耗时 {{ elapsed }} 秒,使用原始顺序",
174
- "stream_out_stats": "耗时 {{ elapsed_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}",
176
+ "stream_out_stats": "总耗时 {{ elapsed_time }} 秒,首token时间: {{ first_token_time }} 秒,输入token数: {{ input_tokens }}, 输出token数: {{ output_tokens }}, 速度: {{ speed }} tokens/秒",
175
177
  "quick_filter_stats": "Quick filter completed in {{ elapsed_time }} seconds, input tokens: {{ input_tokens }}, output tokens: {{ output_tokens }}",
176
178
  "quick_filter_title": "{{ model_name }} 正在分析如何筛选上下文...",
177
179
  "quick_filter_failed": "❌ 快速过滤器失败: {{ error }}. ",
@@ -177,5 +177,5 @@ class CodeAutoMerge:
177
177
 
178
178
  self.printer.print_in_terminal("files_merged", total=total)
179
179
  if not force_skip_git:
180
- commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}")
180
+ commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
181
181
  git_utils.print_commit_info(commit_result=commit_result)
@@ -523,5 +523,5 @@ class CodeAutoMergeDiff:
523
523
 
524
524
  self.printer.print_in_terminal("files_merged_total", total=total)
525
525
  if not force_skip_git:
526
- commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}")
526
+ commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
527
527
  git_utils.print_commit_info(commit_result=commit_result)
@@ -403,7 +403,7 @@ class CodeAutoMergeEditBlock:
403
403
  if not force_skip_git:
404
404
  try:
405
405
  commit_result = git_utils.commit_changes(
406
- self.args.source_dir, f"auto_coder_{file_name}_{md5}"
406
+ self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}"
407
407
  )
408
408
  git_utils.print_commit_info(commit_result=commit_result)
409
409
  except Exception as e:
@@ -233,7 +233,7 @@ class CodeAutoMergeStrictDiff:
233
233
 
234
234
  self.printer.print_in_terminal("files_merged_total", total=total)
235
235
  if not force_skip_git:
236
- commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}")
236
+ commit_result = git_utils.commit_changes(self.args.source_dir, f"auto_coder_{file_name}_{md5}\n{self.args.query}")
237
237
  git_utils.print_commit_info(commit_result=commit_result)
238
238
 
239
239
  @byzerllm.prompt(render="jinja2")
@@ -8,6 +8,8 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
8
8
  import traceback
9
9
  from autocoder.common.utils_code_auto_generate import chat_with_continue
10
10
  from byzerllm.utils.str2model import to_model
11
+
12
+ from autocoder.utils.llms import get_llm_names
11
13
  class RankResult(BaseModel):
12
14
  rank_result: List[int]
13
15
 
@@ -78,9 +80,7 @@ class CodeModificationRanker:
78
80
  # Submit tasks for each model and generate_times
79
81
  futures = []
80
82
  for llm in self.llms:
81
- model_name = getattr(llm, 'default_model_name', None)
82
- if not model_name:
83
- model_name = "unknown(without default model name)"
83
+ model_name = ",".join(get_llm_names(llm))
84
84
  self.printer.print_in_terminal(
85
85
  "ranking_start", style="blue", count=len(generate_result.contents), model_name=model_name)
86
86
 
@@ -0,0 +1,21 @@
1
+ import threading
2
+
3
+ class GlobalCancel:
4
+ def __init__(self):
5
+ self._flag = False
6
+ self._lock = threading.Lock()
7
+
8
+ @property
9
+ def requested(self):
10
+ with self._lock:
11
+ return self._flag
12
+
13
+ def set(self):
14
+ with self._lock:
15
+ self._flag = True
16
+
17
+ def reset(self):
18
+ with self._lock:
19
+ self._flag = False
20
+
21
+ global_cancel = GlobalCancel()
@@ -3,7 +3,6 @@ from asyncio import Queue as AsyncQueue
3
3
  import threading
4
4
  from typing import List, Dict, Any, Optional
5
5
  from dataclasses import dataclass
6
- import byzerllm
7
6
  from autocoder.common.mcp_hub import McpHub
8
7
  from autocoder.common.mcp_tools import McpExecutor
9
8
  from autocoder.common.mcp_hub import MCP_BUILD_IN_SERVERS
@@ -13,12 +12,13 @@ import time
13
12
  from pydantic import BaseModel
14
13
  import sys
15
14
  from loguru import logger
15
+ from autocoder.utils.llms import get_single_llm
16
16
 
17
17
  @dataclass
18
18
  class McpRequest:
19
19
  query: str
20
20
  model: Optional[str] = None
21
-
21
+ product_mode: Optional[str] = None
22
22
 
23
23
  @dataclass
24
24
  class McpInstallRequest:
@@ -279,8 +279,7 @@ class McpServer:
279
279
  await self._response_queue.put(McpResponse(result="", error=f"Failed to refresh MCP servers: {str(e)}"))
280
280
 
281
281
  else:
282
- llm = byzerllm.ByzerLLM.from_default_model(
283
- model=request.model)
282
+ llm = get_single_llm(request.model,product_mode=request.product_mode)
284
283
  mcp_executor = McpExecutor(hub, llm)
285
284
  conversations = [
286
285
  {"role": "user", "content": request.query}]
@@ -55,7 +55,7 @@ async def handle_list_tools() -> list[types.Tool]:
55
55
  # Commenting out larger models,which have higher risks of timing out,
56
56
  # until Claude Desktop can handle long-running tasks effectively.
57
57
  # "llama-3.1-sonar-large-128k-online",
58
- "llama-3.1-sonar-huge-128k-online",
58
+ "sonar-reasoning-pro",
59
59
  ],
60
60
  },
61
61
  "messages": {
@@ -26,6 +26,7 @@ from autocoder.utils.conversation_store import store_code_model_conversation
26
26
  from loguru import logger
27
27
  import time
28
28
  from autocoder.common.printer import Printer
29
+ from autocoder.utils.llms import get_llm_names
29
30
 
30
31
 
31
32
  class BaseAction:
@@ -123,11 +124,16 @@ class ActionTSProject(BaseAction):
123
124
  generate_result = generate.single_round_run(
124
125
  query=args.query, source_content=content
125
126
  )
127
+ elapsed_time = time.time() - start_time
128
+ speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
129
+ model_names = ",".join(get_llm_names(self.llm))
126
130
  self.printer.print_in_terminal(
127
131
  "code_generation_complete",
128
- duration=time.time() - start_time,
132
+ duration=elapsed_time,
129
133
  input_tokens=generate_result.metadata.get('input_tokens_count', 0),
130
- output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
134
+ output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
135
+ speed=round(speed, 2),
136
+ model_names=model_names
131
137
  )
132
138
  merge_result = None
133
139
  if args.execute and args.auto_merge:
@@ -213,11 +219,16 @@ class ActionPyScriptProject(BaseAction):
213
219
  query=args.query, source_content=content
214
220
  )
215
221
 
222
+ elapsed_time = time.time() - start_time
223
+ speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
224
+ model_names = ",".join(get_llm_names(self.llm))
216
225
  self.printer.print_in_terminal(
217
226
  "code_generation_complete",
218
- duration=time.time() - start_time,
227
+ duration=elapsed_time,
219
228
  input_tokens=generate_result.metadata.get('input_tokens_count', 0),
220
- output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
229
+ output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
230
+ speed=round(speed, 2),
231
+ model_names=model_names
221
232
  )
222
233
  merge_result = None
223
234
  if args.execute and args.auto_merge:
@@ -335,11 +346,16 @@ class ActionPyProject(BaseAction):
335
346
  generate_result = generate.single_round_run(
336
347
  query=args.query, source_content=content
337
348
  )
349
+ elapsed_time = time.time() - start_time
350
+ speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
351
+ model_names = ",".join(get_llm_names(self.llm))
338
352
  self.printer.print_in_terminal(
339
353
  "code_generation_complete",
340
- duration=time.time() - start_time,
354
+ duration=elapsed_time,
341
355
  input_tokens=generate_result.metadata.get('input_tokens_count', 0),
342
- output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
356
+ output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
357
+ speed=round(speed, 2),
358
+ model_names=model_names
343
359
  )
344
360
  merge_result = None
345
361
  if args.execute and args.auto_merge:
@@ -440,11 +456,16 @@ class ActionSuffixProject(BaseAction):
440
456
  query=args.query, source_content=content
441
457
  )
442
458
 
459
+ elapsed_time = time.time() - start_time
460
+ speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
461
+ model_names = ",".join(get_llm_names(self.llm))
443
462
  self.printer.print_in_terminal(
444
463
  "code_generation_complete",
445
- duration=time.time() - start_time,
464
+ duration=elapsed_time,
446
465
  input_tokens=generate_result.metadata.get('input_tokens_count', 0),
447
- output_tokens=generate_result.metadata.get('generated_tokens_count', 0)
466
+ output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
467
+ speed=round(speed, 2),
468
+ model_names=model_names
448
469
  )
449
470
  merge_result = None
450
471
  if args.execute and args.auto_merge:
@@ -12,9 +12,10 @@ from autocoder.common.code_auto_generate_editblock import CodeAutoGenerateEditBl
12
12
  from autocoder.index.entry import build_index_and_filter_files
13
13
  from autocoder.regexproject import RegexProject
14
14
  from autocoder.utils.conversation_store import store_code_model_conversation
15
- from loguru import logger
15
+ from autocoder.common.printer import Printer
16
16
  import time
17
-
17
+ from autocoder.utils.llms import get_llm_names
18
+ from loguru import logger
18
19
  class ActionRegexProject:
19
20
  def __init__(
20
21
  self, args: AutoCoderArgs, llm: Optional[byzerllm.ByzerLLM] = None
@@ -22,6 +23,7 @@ class ActionRegexProject:
22
23
  self.args = args
23
24
  self.llm = llm
24
25
  self.pp = None
26
+ self.printer = Printer()
25
27
 
26
28
  def run(self):
27
29
  args = self.args
@@ -58,7 +60,7 @@ class ActionRegexProject:
58
60
 
59
61
  start_time = time.time()
60
62
  if args.execute:
61
- logger.info("Auto generate the code...")
63
+ self.printer.print_in_terminal("code_generation_start")
62
64
 
63
65
  if args.auto_merge == "diff":
64
66
  generate = CodeAutoGenerateDiff(
@@ -83,10 +85,20 @@ class ActionRegexProject:
83
85
  query=args.query, source_content=content
84
86
  )
85
87
 
86
- logger.info(f"Code generation completed in {time.time() - start_time:.2f} seconds, input_tokens_count: {generate_result.metadata.get('input_tokens_count', 0)}, generated_tokens_count: {generate_result.metadata.get('generated_tokens_count', 0)}")
88
+ elapsed_time = time.time() - start_time
89
+ speed = generate_result.metadata.get('generated_tokens_count', 0) / elapsed_time if elapsed_time > 0 else 0
90
+ model_names = ",".join(get_llm_names(self.llm))
91
+ self.printer.print_in_terminal(
92
+ "code_generation_complete",
93
+ duration=elapsed_time,
94
+ input_tokens=generate_result.metadata.get('input_tokens_count', 0),
95
+ output_tokens=generate_result.metadata.get('generated_tokens_count', 0),
96
+ speed=round(speed, 2),
97
+ model_names=model_names
98
+ )
87
99
  merge_result = None
88
100
  if args.execute and args.auto_merge:
89
- logger.info("Auto merge the code...")
101
+ self.printer.print_in_terminal("code_merge_start")
90
102
  if args.auto_merge == "diff":
91
103
  code_merge = CodeAutoMergeDiff(llm=self.llm, args=self.args)
92
104
  merge_result = code_merge.merge_code(generate_result=generate_result)
@@ -17,6 +17,8 @@ from autocoder.common.printer import Printer
17
17
  from concurrent.futures import ThreadPoolExecutor
18
18
  import threading
19
19
 
20
+ from autocoder.utils.llms import get_llm_names
21
+
20
22
 
21
23
  def get_file_path(file_path):
22
24
  if file_path.startswith("##"):
@@ -70,9 +72,7 @@ class QuickFilter():
70
72
 
71
73
  def process_chunk(chunk_index: int, chunk: List[IndexItem]) -> None:
72
74
  try:
73
- model_name = getattr(self.index_manager.index_filter_llm, 'default_model_name', None)
74
- if not model_name:
75
- model_name = "unknown(without default model name)"
75
+ model_name = ",".join(get_llm_names(self.index_manager.index_filter_llm))
76
76
 
77
77
  if chunk_index == 0:
78
78
  # 第一个chunk使用流式输出
@@ -180,9 +180,7 @@ class QuickFilter():
180
180
  return self.big_filter(index_items)
181
181
 
182
182
  try:
183
- model_name = getattr(self.index_manager.index_filter_llm, 'default_model_name', None)
184
- if not model_name:
185
- model_name = "unknown(without default model name)"
183
+ model_name = ",".join(get_llm_names(self.index_manager.index_filter_llm))
186
184
 
187
185
  # 渲染 Prompt 模板
188
186
  query = self.quick_filter_files.prompt(index_items, self.args.query)