auto-coder 0.1.334__py3-none-any.whl → 0.1.340__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/METADATA +2 -2
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/RECORD +70 -34
- autocoder/agent/agentic_edit.py +833 -0
- autocoder/agent/agentic_edit_tools/__init__.py +28 -0
- autocoder/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py +32 -0
- autocoder/agent/agentic_edit_tools/attempt_completion_tool_resolver.py +29 -0
- autocoder/agent/agentic_edit_tools/base_tool_resolver.py +29 -0
- autocoder/agent/agentic_edit_tools/execute_command_tool_resolver.py +84 -0
- autocoder/agent/agentic_edit_tools/list_code_definition_names_tool_resolver.py +75 -0
- autocoder/agent/agentic_edit_tools/list_files_tool_resolver.py +62 -0
- autocoder/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py +30 -0
- autocoder/agent/agentic_edit_tools/read_file_tool_resolver.py +36 -0
- autocoder/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +95 -0
- autocoder/agent/agentic_edit_tools/search_files_tool_resolver.py +70 -0
- autocoder/agent/agentic_edit_tools/use_mcp_tool_resolver.py +55 -0
- autocoder/agent/agentic_edit_tools/write_to_file_tool_resolver.py +98 -0
- autocoder/agent/agentic_edit_types.py +124 -0
- autocoder/agent/agentic_filter.py +14 -7
- autocoder/auto_coder.py +39 -18
- autocoder/auto_coder_rag.py +18 -9
- autocoder/auto_coder_runner.py +107 -8
- autocoder/chat_auto_coder.py +1 -2
- autocoder/chat_auto_coder_lang.py +18 -2
- autocoder/commands/tools.py +5 -1
- autocoder/common/__init__.py +2 -0
- autocoder/common/auto_coder_lang.py +84 -8
- autocoder/common/code_auto_generate_diff.py +1 -1
- autocoder/common/code_auto_generate_editblock.py +1 -1
- autocoder/common/code_auto_generate_strict_diff.py +1 -1
- autocoder/common/mcp_hub.py +185 -2
- autocoder/common/mcp_server.py +243 -306
- autocoder/common/mcp_server_install.py +269 -0
- autocoder/common/mcp_server_types.py +169 -0
- autocoder/common/stream_out_type.py +3 -0
- autocoder/common/v2/agent/__init__.py +0 -0
- autocoder/common/v2/agent/agentic_edit.py +1433 -0
- autocoder/common/v2/agent/agentic_edit_conversation.py +179 -0
- autocoder/common/v2/agent/agentic_edit_tools/__init__.py +28 -0
- autocoder/common/v2/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py +70 -0
- autocoder/common/v2/agent/agentic_edit_tools/attempt_completion_tool_resolver.py +35 -0
- autocoder/common/v2/agent/agentic_edit_tools/base_tool_resolver.py +33 -0
- autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py +88 -0
- autocoder/common/v2/agent/agentic_edit_tools/list_code_definition_names_tool_resolver.py +80 -0
- autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +105 -0
- autocoder/common/v2/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py +35 -0
- autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py +51 -0
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +153 -0
- autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +104 -0
- autocoder/common/v2/agent/agentic_edit_tools/use_mcp_tool_resolver.py +46 -0
- autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +58 -0
- autocoder/common/v2/agent/agentic_edit_types.py +167 -0
- autocoder/common/v2/agent/agentic_tool_display.py +184 -0
- autocoder/common/v2/code_agentic_editblock_manager.py +812 -0
- autocoder/common/v2/code_auto_generate.py +1 -1
- autocoder/common/v2/code_auto_generate_diff.py +1 -1
- autocoder/common/v2/code_auto_generate_editblock.py +1 -1
- autocoder/common/v2/code_auto_generate_strict_diff.py +1 -1
- autocoder/common/v2/code_editblock_manager.py +151 -178
- autocoder/compilers/provided_compiler.py +3 -2
- autocoder/events/event_manager.py +4 -4
- autocoder/events/event_types.py +1 -0
- autocoder/memory/active_context_manager.py +2 -29
- autocoder/models.py +10 -2
- autocoder/shadows/shadow_manager.py +1 -1
- autocoder/utils/llms.py +4 -2
- autocoder/version.py +1 -1
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.334.dist-info → auto_coder-0.1.340.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
from typing import Dict, Any, Optional, List, Tuple
|
|
4
|
+
from .base_tool_resolver import BaseToolResolver
|
|
5
|
+
from autocoder.agent.agentic_edit_types import WriteToFileTool, ToolResult # Import ToolResult from types
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class WriteToFileToolResolver(BaseToolResolver):
|
|
10
|
+
def __init__(self, agent: Optional[Any], tool: WriteToFileTool, args: Dict[str, Any]):
|
|
11
|
+
super().__init__(agent, tool, args)
|
|
12
|
+
self.tool: WriteToFileTool = tool # For type hinting
|
|
13
|
+
|
|
14
|
+
def parse_diff(self, diff_content: str) -> List[Tuple[str, str]]:
|
|
15
|
+
"""
|
|
16
|
+
Parses the diff content into a list of (search_block, replace_block) tuples.
|
|
17
|
+
"""
|
|
18
|
+
blocks = []
|
|
19
|
+
# Regex to find SEARCH/REPLACE blocks, handling potential variations in line endings
|
|
20
|
+
pattern = re.compile(r"<<<<<<< SEARCH\r?\n(.*?)\r?\n=======\r?\n(.*?)\r?\n>>>>>>> REPLACE", re.DOTALL)
|
|
21
|
+
matches = pattern.findall(diff_content)
|
|
22
|
+
for search_block, replace_block in matches:
|
|
23
|
+
blocks.append((search_block, replace_block))
|
|
24
|
+
if not matches and diff_content.strip():
|
|
25
|
+
logger.warning(f"Could not parse any SEARCH/REPLACE blocks from diff: {diff_content}")
|
|
26
|
+
return blocks
|
|
27
|
+
|
|
28
|
+
def resolve(self) -> ToolResult:
|
|
29
|
+
file_path = self.tool.path
|
|
30
|
+
content = self.tool.content
|
|
31
|
+
source_dir = self.args.source_dir or "."
|
|
32
|
+
absolute_path = os.path.abspath(os.path.join(source_dir, file_path))
|
|
33
|
+
|
|
34
|
+
# Security check: ensure the path is within the source directory
|
|
35
|
+
if not absolute_path.startswith(os.path.abspath(source_dir)):
|
|
36
|
+
return ToolResult(success=False, message=f"Error: Access denied. Attempted to write file outside the project directory: {file_path}")
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
# Create directories if they don't exist
|
|
40
|
+
os.makedirs(os.path.dirname(absolute_path), exist_ok=True)
|
|
41
|
+
|
|
42
|
+
# Check if the content contains SEARCH/REPLACE blocks
|
|
43
|
+
parsed_blocks = self.parse_diff(content)
|
|
44
|
+
if parsed_blocks:
|
|
45
|
+
# If file exists, read its current content
|
|
46
|
+
if os.path.exists(absolute_path):
|
|
47
|
+
try:
|
|
48
|
+
with open(absolute_path, 'r', encoding='utf-8', errors='replace') as f:
|
|
49
|
+
original_content = f.read()
|
|
50
|
+
except Exception as e:
|
|
51
|
+
logger.error(f"Error reading existing file '{file_path}' for diff apply: {str(e)}")
|
|
52
|
+
return ToolResult(success=False, message=f"An error occurred while reading the existing file: {str(e)}")
|
|
53
|
+
else:
|
|
54
|
+
# If file does not exist, start with empty content
|
|
55
|
+
original_content = ""
|
|
56
|
+
|
|
57
|
+
current_content = original_content
|
|
58
|
+
applied_count = 0
|
|
59
|
+
errors = []
|
|
60
|
+
|
|
61
|
+
for i, (search_block, replace_block) in enumerate(parsed_blocks):
|
|
62
|
+
start_index = current_content.find(search_block)
|
|
63
|
+
if start_index != -1:
|
|
64
|
+
current_content = (
|
|
65
|
+
current_content[:start_index]
|
|
66
|
+
+ replace_block
|
|
67
|
+
+ current_content[start_index + len(search_block):]
|
|
68
|
+
)
|
|
69
|
+
applied_count += 1
|
|
70
|
+
logger.info(f"Applied SEARCH/REPLACE block {i+1} in file {file_path}")
|
|
71
|
+
else:
|
|
72
|
+
error_message = f"SEARCH block {i+1} not found in current content. Search block:\n---\n{search_block}\n---"
|
|
73
|
+
logger.warning(error_message)
|
|
74
|
+
errors.append(error_message)
|
|
75
|
+
# Continue with next block
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
with open(absolute_path, 'w', encoding='utf-8') as f:
|
|
79
|
+
f.write(current_content)
|
|
80
|
+
message = f"Successfully applied {applied_count}/{len(parsed_blocks)} changes to file: {file_path}."
|
|
81
|
+
if errors:
|
|
82
|
+
message += "\nWarnings:\n" + "\n".join(errors)
|
|
83
|
+
logger.info(message)
|
|
84
|
+
return ToolResult(success=True, message=message, content=current_content)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.error(f"Error writing replaced content to file '{file_path}': {str(e)}")
|
|
87
|
+
return ToolResult(success=False, message=f"An error occurred while writing the modified file: {str(e)}")
|
|
88
|
+
else:
|
|
89
|
+
# No diff blocks detected, treat as full content overwrite
|
|
90
|
+
with open(absolute_path, 'w', encoding='utf-8') as f:
|
|
91
|
+
f.write(content)
|
|
92
|
+
|
|
93
|
+
logger.info(f"Successfully wrote to file: {file_path}")
|
|
94
|
+
return ToolResult(success=True, message=f"Successfully wrote to file: {file_path}", content=content)
|
|
95
|
+
|
|
96
|
+
except Exception as e:
|
|
97
|
+
logger.error(f"Error writing to file '{file_path}': {str(e)}")
|
|
98
|
+
return ToolResult(success=False, message=f"An error occurred while writing to the file: {str(e)}")
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import List, Dict, Any, Callable, Optional, Type
|
|
3
|
+
from pydantic import SkipValidation
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Result class used by Tool Resolvers
|
|
7
|
+
class ToolResult(BaseModel):
|
|
8
|
+
success: bool
|
|
9
|
+
message: str
|
|
10
|
+
content: Any = None # Can store file content, command output, etc.
|
|
11
|
+
|
|
12
|
+
# Pydantic Models for Tools
|
|
13
|
+
class BaseTool(BaseModel):
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
class ExecuteCommandTool(BaseTool):
|
|
17
|
+
command: str
|
|
18
|
+
requires_approval: bool
|
|
19
|
+
|
|
20
|
+
class ReadFileTool(BaseTool):
|
|
21
|
+
path: str
|
|
22
|
+
|
|
23
|
+
class WriteToFileTool(BaseTool):
|
|
24
|
+
path: str
|
|
25
|
+
content: str
|
|
26
|
+
|
|
27
|
+
class ReplaceInFileTool(BaseTool):
|
|
28
|
+
path: str
|
|
29
|
+
diff: str
|
|
30
|
+
|
|
31
|
+
class SearchFilesTool(BaseTool):
|
|
32
|
+
path: str
|
|
33
|
+
regex: str
|
|
34
|
+
file_pattern: Optional[str] = None
|
|
35
|
+
|
|
36
|
+
class ListFilesTool(BaseTool):
|
|
37
|
+
path: str
|
|
38
|
+
recursive: Optional[bool] = False
|
|
39
|
+
|
|
40
|
+
class ListCodeDefinitionNamesTool(BaseTool):
|
|
41
|
+
path: str
|
|
42
|
+
|
|
43
|
+
class AskFollowupQuestionTool(BaseTool):
|
|
44
|
+
question: str
|
|
45
|
+
options: Optional[List[str]] = None
|
|
46
|
+
|
|
47
|
+
class AttemptCompletionTool(BaseTool):
|
|
48
|
+
result: str
|
|
49
|
+
command: Optional[str] = None
|
|
50
|
+
|
|
51
|
+
class PlanModeRespondTool(BaseTool):
|
|
52
|
+
response: str
|
|
53
|
+
options: Optional[List[str]] = None
|
|
54
|
+
|
|
55
|
+
class UseMcpTool(BaseTool):
|
|
56
|
+
server_name: str
|
|
57
|
+
tool_name: str
|
|
58
|
+
arguments: Dict[str, Any]
|
|
59
|
+
|
|
60
|
+
class PlainTextOutput(BaseModel):
|
|
61
|
+
text: str
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# Mapping from tool tag names to Pydantic models
|
|
65
|
+
TOOL_MODEL_MAP: Dict[str, Type[BaseTool]] = {
|
|
66
|
+
"execute_command": ExecuteCommandTool,
|
|
67
|
+
"read_file": ReadFileTool,
|
|
68
|
+
"write_to_file": WriteToFileTool,
|
|
69
|
+
"replace_in_file": ReplaceInFileTool,
|
|
70
|
+
"search_files": SearchFilesTool,
|
|
71
|
+
"list_files": ListFilesTool,
|
|
72
|
+
"list_code_definition_names": ListCodeDefinitionNamesTool,
|
|
73
|
+
"ask_followup_question": AskFollowupQuestionTool,
|
|
74
|
+
"attempt_completion": AttemptCompletionTool,
|
|
75
|
+
"plan_mode_respond": PlanModeRespondTool,
|
|
76
|
+
"use_mcp_tool": UseMcpTool,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class AgenticEditRequest(BaseModel):
|
|
81
|
+
user_input: str
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class FileOperation(BaseModel):
|
|
85
|
+
path: str
|
|
86
|
+
operation: str # e.g., "MODIFY", "REFERENCE", "ADD", "REMOVE"
|
|
87
|
+
class MemoryConfig(BaseModel):
|
|
88
|
+
"""
|
|
89
|
+
A model to encapsulate memory configuration and operations.
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
memory: Dict[str, Any]
|
|
93
|
+
save_memory_func: SkipValidation[Callable]
|
|
94
|
+
|
|
95
|
+
class Config:
|
|
96
|
+
arbitrary_types_allowed = True
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class CommandConfig(BaseModel):
|
|
100
|
+
coding: SkipValidation[Callable]
|
|
101
|
+
chat: SkipValidation[Callable]
|
|
102
|
+
add_files: SkipValidation[Callable]
|
|
103
|
+
remove_files: SkipValidation[Callable]
|
|
104
|
+
index_build: SkipValidation[Callable]
|
|
105
|
+
index_query: SkipValidation[Callable]
|
|
106
|
+
list_files: SkipValidation[Callable]
|
|
107
|
+
ask: SkipValidation[Callable]
|
|
108
|
+
revert: SkipValidation[Callable]
|
|
109
|
+
commit: SkipValidation[Callable]
|
|
110
|
+
help: SkipValidation[Callable]
|
|
111
|
+
exclude_dirs: SkipValidation[Callable]
|
|
112
|
+
summon: SkipValidation[Callable]
|
|
113
|
+
design: SkipValidation[Callable]
|
|
114
|
+
mcp: SkipValidation[Callable]
|
|
115
|
+
models: SkipValidation[Callable]
|
|
116
|
+
lib: SkipValidation[Callable]
|
|
117
|
+
execute_shell_command: SkipValidation[Callable]
|
|
118
|
+
generate_shell_command: SkipValidation[Callable]
|
|
119
|
+
conf_export: SkipValidation[Callable]
|
|
120
|
+
conf_import: SkipValidation[Callable]
|
|
121
|
+
index_export: SkipValidation[Callable]
|
|
122
|
+
index_import: SkipValidation[Callable]
|
|
123
|
+
exclude_files: SkipValidation[Callable]
|
|
124
|
+
|
|
@@ -259,8 +259,8 @@ class AgenticFilter:
|
|
|
259
259
|
3. **深入分析**:
|
|
260
260
|
* 使用 `read_files` 读取关键文件的内容进行确认。如果文件过大,使用 `line_ranges` 参数分段读取。
|
|
261
261
|
* 如有必要,使用 `run_python` 或 `execute_shell_command` 执行代码或命令进行更复杂的分析。
|
|
262
|
-
4. **迭代决策**: 根据工具的返回结果,你可能需要多次调用不同的工具来逐步缩小范围或获取更多信息。
|
|
263
|
-
|
|
262
|
+
4. **迭代决策**: 根据工具的返回结果,你可能需要多次调用不同的工具来逐步缩小范围或获取更多信息。
|
|
263
|
+
6. **最终响应**: 当你确定了所有需要参考和修改的文件后,**必须**调用 `output_result` 工具,并提供符合其要求格式的JSON字符串作为其 `response` 参数。
|
|
264
264
|
该json格式要求为:
|
|
265
265
|
```json
|
|
266
266
|
{
|
|
@@ -273,9 +273,17 @@ class AgenticFilter:
|
|
|
273
273
|
"reasoning": "详细说明你是如何通过分析和使用工具得出这个文件列表的。"
|
|
274
274
|
}
|
|
275
275
|
```
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
276
|
+
|
|
277
|
+
{% if enable_active_context %}
|
|
278
|
+
** 非常非常重要的提示 **
|
|
279
|
+
每一个目录都有一个描述信息,比如 {{ project_path }}/src/abc/bbc 的目录描述信息会放在 {{ project_path }}/.auto-coder/active-context/src/abc/bbc/active.md 文件中。
|
|
280
|
+
你可以使用 read_files 函数读取,从而帮助你更好的挑选要详细阅读哪个文件。值得注意的是,active.md 并不会包含该目录下所有的文件信息,只保存最近发生变更的文件的信息。
|
|
281
|
+
{% endif %}
|
|
282
|
+
"""
|
|
283
|
+
return {
|
|
284
|
+
"project_path": os.path.abspath(self.args.source_dir),
|
|
285
|
+
"enable_active_context": self.args.enable_active_context,
|
|
286
|
+
}
|
|
279
287
|
|
|
280
288
|
@byzerllm.prompt()
|
|
281
289
|
def _execute_command_result(self, result: str) -> str:
|
|
@@ -590,8 +598,7 @@ class AgenticFilter:
|
|
|
590
598
|
- 如果未找到匹配项,会返回提示信息
|
|
591
599
|
|
|
592
600
|
</usage>
|
|
593
|
-
</command>
|
|
594
|
-
|
|
601
|
+
</command>
|
|
595
602
|
<command>
|
|
596
603
|
<n>execute_mcp_server</n>
|
|
597
604
|
<description>执行MCP服务器</description>
|
autocoder/auto_coder.py
CHANGED
|
@@ -52,6 +52,11 @@ from autocoder.privacy.model_filter import ModelPathFilter
|
|
|
52
52
|
from autocoder.common.result_manager import ResultManager
|
|
53
53
|
from autocoder.events.event_manager_singleton import get_event_manager
|
|
54
54
|
from autocoder.events import event_content as EventContentCreator
|
|
55
|
+
from autocoder.common.mcp_server import get_mcp_server
|
|
56
|
+
from autocoder.common.mcp_server_types import (
|
|
57
|
+
McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest,
|
|
58
|
+
McpListRunningRequest, McpRefreshRequest
|
|
59
|
+
)
|
|
55
60
|
|
|
56
61
|
console = Console()
|
|
57
62
|
|
|
@@ -261,7 +266,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
261
266
|
"saas.base_url": model_info["base_url"],
|
|
262
267
|
"saas.api_key": model_info["api_key"],
|
|
263
268
|
"saas.model": model_info["model_name"],
|
|
264
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
269
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
270
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
265
271
|
}
|
|
266
272
|
)
|
|
267
273
|
|
|
@@ -284,7 +290,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
284
290
|
"saas.base_url": model_info["base_url"],
|
|
285
291
|
"saas.api_key": model_info["api_key"],
|
|
286
292
|
"saas.model": model_info["model_name"],
|
|
287
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
293
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
294
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
288
295
|
}
|
|
289
296
|
)
|
|
290
297
|
models.append(code_model)
|
|
@@ -302,7 +309,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
302
309
|
"saas.base_url": model_info["base_url"],
|
|
303
310
|
"saas.api_key": model_info["api_key"],
|
|
304
311
|
"saas.model": model_info["model_name"],
|
|
305
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
312
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
313
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
306
314
|
}
|
|
307
315
|
)
|
|
308
316
|
llm.setup_sub_client("code_model", code_model)
|
|
@@ -323,7 +331,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
323
331
|
"saas.base_url": model_info["base_url"],
|
|
324
332
|
"saas.api_key": model_info["api_key"],
|
|
325
333
|
"saas.model": model_info["model_name"],
|
|
326
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
334
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
335
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
327
336
|
}
|
|
328
337
|
)
|
|
329
338
|
models.append(rerank_model)
|
|
@@ -341,7 +350,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
341
350
|
"saas.base_url": model_info["base_url"],
|
|
342
351
|
"saas.api_key": model_info["api_key"],
|
|
343
352
|
"saas.model": model_info["model_name"],
|
|
344
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
353
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
354
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
345
355
|
}
|
|
346
356
|
)
|
|
347
357
|
llm.setup_sub_client("generate_rerank_model", rerank_model)
|
|
@@ -358,7 +368,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
358
368
|
"saas.base_url": model_info["base_url"],
|
|
359
369
|
"saas.api_key": model_info["api_key"],
|
|
360
370
|
"saas.model": model_info["model_name"],
|
|
361
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
371
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
372
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
362
373
|
}
|
|
363
374
|
)
|
|
364
375
|
llm.setup_sub_client("inference_model", inference_model)
|
|
@@ -375,7 +386,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
375
386
|
"saas.base_url": model_info["base_url"],
|
|
376
387
|
"saas.api_key": model_info["api_key"],
|
|
377
388
|
"saas.model": model_info["model_name"],
|
|
378
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
389
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
390
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
379
391
|
}
|
|
380
392
|
)
|
|
381
393
|
llm.setup_sub_client("index_filter_model", index_filter_model)
|
|
@@ -541,7 +553,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
541
553
|
"saas.base_url": model_info["base_url"],
|
|
542
554
|
"saas.api_key": model_info["api_key"],
|
|
543
555
|
"saas.model": model_info["model_name"],
|
|
544
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
556
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
557
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
545
558
|
}
|
|
546
559
|
)
|
|
547
560
|
llm.setup_sub_client("chat_model", chat_model)
|
|
@@ -558,7 +571,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
558
571
|
"saas.base_url": model_info["base_url"],
|
|
559
572
|
"saas.api_key": model_info["api_key"],
|
|
560
573
|
"saas.model": model_info["model_name"],
|
|
561
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
574
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
575
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
562
576
|
}
|
|
563
577
|
)
|
|
564
578
|
llm.setup_sub_client("vl_model", vl_model)
|
|
@@ -575,7 +589,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
575
589
|
"saas.base_url": model_info["base_url"],
|
|
576
590
|
"saas.api_key": model_info["api_key"],
|
|
577
591
|
"saas.model": model_info["model_name"],
|
|
578
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
592
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
593
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
579
594
|
}
|
|
580
595
|
)
|
|
581
596
|
llm.setup_sub_client("index_model", index_model)
|
|
@@ -592,7 +607,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
592
607
|
"saas.base_url": model_info["base_url"],
|
|
593
608
|
"saas.api_key": model_info["api_key"],
|
|
594
609
|
"saas.model": model_info["model_name"],
|
|
595
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
610
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
611
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
596
612
|
}
|
|
597
613
|
)
|
|
598
614
|
llm.setup_sub_client("sd_model", sd_model)
|
|
@@ -609,7 +625,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
609
625
|
"saas.base_url": model_info["base_url"],
|
|
610
626
|
"saas.api_key": model_info["api_key"],
|
|
611
627
|
"saas.model": model_info["model_name"],
|
|
612
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
628
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
629
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
613
630
|
}
|
|
614
631
|
)
|
|
615
632
|
llm.setup_sub_client("text2voice_model", text2voice_model)
|
|
@@ -626,7 +643,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
626
643
|
"saas.base_url": model_info["base_url"],
|
|
627
644
|
"saas.api_key": model_info["api_key"],
|
|
628
645
|
"saas.model": model_info["model_name"],
|
|
629
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
646
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
647
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
630
648
|
}
|
|
631
649
|
)
|
|
632
650
|
llm.setup_sub_client("voice2text_model", voice2text_model)
|
|
@@ -643,7 +661,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
643
661
|
"saas.base_url": model_info["base_url"],
|
|
644
662
|
"saas.api_key": model_info["api_key"],
|
|
645
663
|
"saas.model": model_info["model_name"],
|
|
646
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
664
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
665
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
647
666
|
}
|
|
648
667
|
)
|
|
649
668
|
llm.setup_sub_client("planner_model", planner_model)
|
|
@@ -660,7 +679,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
660
679
|
"saas.base_url": model_info["base_url"],
|
|
661
680
|
"saas.api_key": model_info["api_key"],
|
|
662
681
|
"saas.model": model_info["model_name"],
|
|
663
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
682
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
683
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
664
684
|
}
|
|
665
685
|
)
|
|
666
686
|
llm.setup_sub_client("commit_model", commit_model)
|
|
@@ -677,7 +697,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
677
697
|
"saas.base_url": model_info["base_url"],
|
|
678
698
|
"saas.api_key": model_info["api_key"],
|
|
679
699
|
"saas.model": model_info["model_name"],
|
|
680
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
700
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
701
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
681
702
|
}
|
|
682
703
|
)
|
|
683
704
|
llm.setup_sub_client("designer_model", designer_model)
|
|
@@ -694,7 +715,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
694
715
|
"saas.base_url": model_info["base_url"],
|
|
695
716
|
"saas.api_key": model_info["api_key"],
|
|
696
717
|
"saas.model": model_info["model_name"],
|
|
697
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
718
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
719
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
698
720
|
}
|
|
699
721
|
)
|
|
700
722
|
llm.setup_sub_client("emb_model", emb_model)
|
|
@@ -1254,7 +1276,6 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
1254
1276
|
v = (item for item in response)
|
|
1255
1277
|
|
|
1256
1278
|
elif "mcp" in commands_info:
|
|
1257
|
-
from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest
|
|
1258
1279
|
mcp_server = get_mcp_server()
|
|
1259
1280
|
|
|
1260
1281
|
pos_args = commands_info["mcp"].get("args", [])
|
autocoder/auto_coder_rag.py
CHANGED
|
@@ -636,7 +636,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
636
636
|
"saas.base_url": model_info["base_url"],
|
|
637
637
|
"saas.api_key": model_info["api_key"],
|
|
638
638
|
"saas.model": model_info["model_name"],
|
|
639
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
639
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
640
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
640
641
|
}
|
|
641
642
|
)
|
|
642
643
|
|
|
@@ -652,7 +653,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
652
653
|
"saas.base_url": model_info["base_url"],
|
|
653
654
|
"saas.api_key": model_info["api_key"],
|
|
654
655
|
"saas.model": model_info["model_name"],
|
|
655
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
656
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
657
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
656
658
|
}
|
|
657
659
|
)
|
|
658
660
|
llm.setup_sub_client("recall_model", recall_model)
|
|
@@ -668,7 +670,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
668
670
|
"saas.base_url": model_info["base_url"],
|
|
669
671
|
"saas.api_key": model_info["api_key"],
|
|
670
672
|
"saas.model": model_info["model_name"],
|
|
671
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
673
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
674
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
672
675
|
}
|
|
673
676
|
)
|
|
674
677
|
llm.setup_sub_client("chunk_model", chunk_model)
|
|
@@ -684,7 +687,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
684
687
|
"saas.base_url": model_info["base_url"],
|
|
685
688
|
"saas.api_key": model_info["api_key"],
|
|
686
689
|
"saas.model": model_info["model_name"],
|
|
687
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
690
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
691
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
688
692
|
}
|
|
689
693
|
)
|
|
690
694
|
llm.setup_sub_client("qa_model", qa_model)
|
|
@@ -700,7 +704,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
700
704
|
"saas.base_url": model_info["base_url"],
|
|
701
705
|
"saas.api_key": model_info["api_key"],
|
|
702
706
|
"saas.model": model_info["model_name"],
|
|
703
|
-
"saas.is_reasoning": False
|
|
707
|
+
"saas.is_reasoning": False,
|
|
708
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
704
709
|
}
|
|
705
710
|
)
|
|
706
711
|
llm.setup_sub_client("emb_model", emb_model)
|
|
@@ -792,7 +797,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
792
797
|
"saas.base_url": model_info["base_url"],
|
|
793
798
|
"saas.api_key": model_info["api_key"],
|
|
794
799
|
"saas.model": model_info["model_name"],
|
|
795
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
800
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
801
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
796
802
|
}
|
|
797
803
|
)
|
|
798
804
|
|
|
@@ -806,7 +812,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
806
812
|
"saas.base_url": model_info["base_url"],
|
|
807
813
|
"saas.api_key": model_info["api_key"],
|
|
808
814
|
"saas.model": model_info["model_name"],
|
|
809
|
-
"saas.is_reasoning": False
|
|
815
|
+
"saas.is_reasoning": False,
|
|
816
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
810
817
|
}
|
|
811
818
|
)
|
|
812
819
|
llm.setup_sub_client("emb_model", emb_model)
|
|
@@ -852,7 +859,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
852
859
|
"saas.base_url": model_info["base_url"],
|
|
853
860
|
"saas.api_key": model_info["api_key"],
|
|
854
861
|
"saas.model": model_info["model_name"],
|
|
855
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
862
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
863
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
856
864
|
}
|
|
857
865
|
)
|
|
858
866
|
|
|
@@ -882,7 +890,8 @@ def main(input_args: Optional[List[str]] = None):
|
|
|
882
890
|
"saas.base_url": model_info["base_url"],
|
|
883
891
|
"saas.api_key": model_info["api_key"],
|
|
884
892
|
"saas.model": model_info["model_name"],
|
|
885
|
-
"saas.is_reasoning": model_info["is_reasoning"]
|
|
893
|
+
"saas.is_reasoning": model_info["is_reasoning"],
|
|
894
|
+
"saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
|
|
886
895
|
}
|
|
887
896
|
)
|
|
888
897
|
|