auto-coder 0.1.334__py3-none-any.whl → 0.1.336__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of auto-coder might be problematic. Click here for more details.

Files changed (67) hide show
  1. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/METADATA +2 -2
  2. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/RECORD +67 -32
  3. autocoder/agent/agentic_edit.py +833 -0
  4. autocoder/agent/agentic_edit_tools/__init__.py +28 -0
  5. autocoder/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py +32 -0
  6. autocoder/agent/agentic_edit_tools/attempt_completion_tool_resolver.py +29 -0
  7. autocoder/agent/agentic_edit_tools/base_tool_resolver.py +29 -0
  8. autocoder/agent/agentic_edit_tools/execute_command_tool_resolver.py +84 -0
  9. autocoder/agent/agentic_edit_tools/list_code_definition_names_tool_resolver.py +75 -0
  10. autocoder/agent/agentic_edit_tools/list_files_tool_resolver.py +62 -0
  11. autocoder/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py +30 -0
  12. autocoder/agent/agentic_edit_tools/read_file_tool_resolver.py +36 -0
  13. autocoder/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +95 -0
  14. autocoder/agent/agentic_edit_tools/search_files_tool_resolver.py +70 -0
  15. autocoder/agent/agentic_edit_tools/use_mcp_tool_resolver.py +55 -0
  16. autocoder/agent/agentic_edit_tools/write_to_file_tool_resolver.py +98 -0
  17. autocoder/agent/agentic_edit_types.py +124 -0
  18. autocoder/auto_coder.py +39 -18
  19. autocoder/auto_coder_rag.py +18 -9
  20. autocoder/auto_coder_runner.py +50 -5
  21. autocoder/chat_auto_coder_lang.py +18 -2
  22. autocoder/commands/tools.py +5 -1
  23. autocoder/common/__init__.py +2 -0
  24. autocoder/common/auto_coder_lang.py +40 -8
  25. autocoder/common/code_auto_generate_diff.py +1 -1
  26. autocoder/common/code_auto_generate_editblock.py +1 -1
  27. autocoder/common/code_auto_generate_strict_diff.py +1 -1
  28. autocoder/common/mcp_hub.py +185 -2
  29. autocoder/common/mcp_server.py +243 -306
  30. autocoder/common/mcp_server_install.py +269 -0
  31. autocoder/common/mcp_server_types.py +169 -0
  32. autocoder/common/stream_out_type.py +3 -0
  33. autocoder/common/v2/agent/__init__.py +0 -0
  34. autocoder/common/v2/agent/agentic_edit.py +1302 -0
  35. autocoder/common/v2/agent/agentic_edit_tools/__init__.py +28 -0
  36. autocoder/common/v2/agent/agentic_edit_tools/ask_followup_question_tool_resolver.py +70 -0
  37. autocoder/common/v2/agent/agentic_edit_tools/attempt_completion_tool_resolver.py +35 -0
  38. autocoder/common/v2/agent/agentic_edit_tools/base_tool_resolver.py +33 -0
  39. autocoder/common/v2/agent/agentic_edit_tools/execute_command_tool_resolver.py +88 -0
  40. autocoder/common/v2/agent/agentic_edit_tools/list_code_definition_names_tool_resolver.py +80 -0
  41. autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +105 -0
  42. autocoder/common/v2/agent/agentic_edit_tools/plan_mode_respond_tool_resolver.py +35 -0
  43. autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py +51 -0
  44. autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +144 -0
  45. autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +99 -0
  46. autocoder/common/v2/agent/agentic_edit_tools/use_mcp_tool_resolver.py +46 -0
  47. autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +58 -0
  48. autocoder/common/v2/agent/agentic_edit_types.py +162 -0
  49. autocoder/common/v2/agent/agentic_tool_display.py +184 -0
  50. autocoder/common/v2/code_agentic_editblock_manager.py +812 -0
  51. autocoder/common/v2/code_auto_generate.py +1 -1
  52. autocoder/common/v2/code_auto_generate_diff.py +1 -1
  53. autocoder/common/v2/code_auto_generate_editblock.py +1 -1
  54. autocoder/common/v2/code_auto_generate_strict_diff.py +1 -1
  55. autocoder/common/v2/code_editblock_manager.py +151 -178
  56. autocoder/compilers/provided_compiler.py +3 -2
  57. autocoder/events/event_manager.py +4 -4
  58. autocoder/events/event_types.py +1 -0
  59. autocoder/memory/active_context_manager.py +2 -29
  60. autocoder/models.py +10 -2
  61. autocoder/shadows/shadow_manager.py +1 -1
  62. autocoder/utils/llms.py +4 -2
  63. autocoder/version.py +1 -1
  64. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/LICENSE +0 -0
  65. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/WHEEL +0 -0
  66. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/entry_points.txt +0 -0
  67. {auto_coder-0.1.334.dist-info → auto_coder-0.1.336.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,98 @@
1
+ import os
2
+ import re
3
+ from typing import Dict, Any, Optional, List, Tuple
4
+ from .base_tool_resolver import BaseToolResolver
5
+ from autocoder.agent.agentic_edit_types import WriteToFileTool, ToolResult # Import ToolResult from types
6
+ from loguru import logger
7
+
8
+
9
+ class WriteToFileToolResolver(BaseToolResolver):
10
+ def __init__(self, agent: Optional[Any], tool: WriteToFileTool, args: Dict[str, Any]):
11
+ super().__init__(agent, tool, args)
12
+ self.tool: WriteToFileTool = tool # For type hinting
13
+
14
+ def parse_diff(self, diff_content: str) -> List[Tuple[str, str]]:
15
+ """
16
+ Parses the diff content into a list of (search_block, replace_block) tuples.
17
+ """
18
+ blocks = []
19
+ # Regex to find SEARCH/REPLACE blocks, handling potential variations in line endings
20
+ pattern = re.compile(r"<<<<<<< SEARCH\r?\n(.*?)\r?\n=======\r?\n(.*?)\r?\n>>>>>>> REPLACE", re.DOTALL)
21
+ matches = pattern.findall(diff_content)
22
+ for search_block, replace_block in matches:
23
+ blocks.append((search_block, replace_block))
24
+ if not matches and diff_content.strip():
25
+ logger.warning(f"Could not parse any SEARCH/REPLACE blocks from diff: {diff_content}")
26
+ return blocks
27
+
28
+ def resolve(self) -> ToolResult:
29
+ file_path = self.tool.path
30
+ content = self.tool.content
31
+ source_dir = self.args.source_dir or "."
32
+ absolute_path = os.path.abspath(os.path.join(source_dir, file_path))
33
+
34
+ # Security check: ensure the path is within the source directory
35
+ if not absolute_path.startswith(os.path.abspath(source_dir)):
36
+ return ToolResult(success=False, message=f"Error: Access denied. Attempted to write file outside the project directory: {file_path}")
37
+
38
+ try:
39
+ # Create directories if they don't exist
40
+ os.makedirs(os.path.dirname(absolute_path), exist_ok=True)
41
+
42
+ # Check if the content contains SEARCH/REPLACE blocks
43
+ parsed_blocks = self.parse_diff(content)
44
+ if parsed_blocks:
45
+ # If file exists, read its current content
46
+ if os.path.exists(absolute_path):
47
+ try:
48
+ with open(absolute_path, 'r', encoding='utf-8', errors='replace') as f:
49
+ original_content = f.read()
50
+ except Exception as e:
51
+ logger.error(f"Error reading existing file '{file_path}' for diff apply: {str(e)}")
52
+ return ToolResult(success=False, message=f"An error occurred while reading the existing file: {str(e)}")
53
+ else:
54
+ # If file does not exist, start with empty content
55
+ original_content = ""
56
+
57
+ current_content = original_content
58
+ applied_count = 0
59
+ errors = []
60
+
61
+ for i, (search_block, replace_block) in enumerate(parsed_blocks):
62
+ start_index = current_content.find(search_block)
63
+ if start_index != -1:
64
+ current_content = (
65
+ current_content[:start_index]
66
+ + replace_block
67
+ + current_content[start_index + len(search_block):]
68
+ )
69
+ applied_count += 1
70
+ logger.info(f"Applied SEARCH/REPLACE block {i+1} in file {file_path}")
71
+ else:
72
+ error_message = f"SEARCH block {i+1} not found in current content. Search block:\n---\n{search_block}\n---"
73
+ logger.warning(error_message)
74
+ errors.append(error_message)
75
+ # Continue with next block
76
+
77
+ try:
78
+ with open(absolute_path, 'w', encoding='utf-8') as f:
79
+ f.write(current_content)
80
+ message = f"Successfully applied {applied_count}/{len(parsed_blocks)} changes to file: {file_path}."
81
+ if errors:
82
+ message += "\nWarnings:\n" + "\n".join(errors)
83
+ logger.info(message)
84
+ return ToolResult(success=True, message=message, content=current_content)
85
+ except Exception as e:
86
+ logger.error(f"Error writing replaced content to file '{file_path}': {str(e)}")
87
+ return ToolResult(success=False, message=f"An error occurred while writing the modified file: {str(e)}")
88
+ else:
89
+ # No diff blocks detected, treat as full content overwrite
90
+ with open(absolute_path, 'w', encoding='utf-8') as f:
91
+ f.write(content)
92
+
93
+ logger.info(f"Successfully wrote to file: {file_path}")
94
+ return ToolResult(success=True, message=f"Successfully wrote to file: {file_path}", content=content)
95
+
96
+ except Exception as e:
97
+ logger.error(f"Error writing to file '{file_path}': {str(e)}")
98
+ return ToolResult(success=False, message=f"An error occurred while writing to the file: {str(e)}")
@@ -0,0 +1,124 @@
1
+ from pydantic import BaseModel
2
+ from typing import List, Dict, Any, Callable, Optional, Type
3
+ from pydantic import SkipValidation
4
+
5
+
6
+ # Result class used by Tool Resolvers
7
+ class ToolResult(BaseModel):
8
+ success: bool
9
+ message: str
10
+ content: Any = None # Can store file content, command output, etc.
11
+
12
+ # Pydantic Models for Tools
13
+ class BaseTool(BaseModel):
14
+ pass
15
+
16
+ class ExecuteCommandTool(BaseTool):
17
+ command: str
18
+ requires_approval: bool
19
+
20
+ class ReadFileTool(BaseTool):
21
+ path: str
22
+
23
+ class WriteToFileTool(BaseTool):
24
+ path: str
25
+ content: str
26
+
27
+ class ReplaceInFileTool(BaseTool):
28
+ path: str
29
+ diff: str
30
+
31
+ class SearchFilesTool(BaseTool):
32
+ path: str
33
+ regex: str
34
+ file_pattern: Optional[str] = None
35
+
36
+ class ListFilesTool(BaseTool):
37
+ path: str
38
+ recursive: Optional[bool] = False
39
+
40
+ class ListCodeDefinitionNamesTool(BaseTool):
41
+ path: str
42
+
43
+ class AskFollowupQuestionTool(BaseTool):
44
+ question: str
45
+ options: Optional[List[str]] = None
46
+
47
+ class AttemptCompletionTool(BaseTool):
48
+ result: str
49
+ command: Optional[str] = None
50
+
51
+ class PlanModeRespondTool(BaseTool):
52
+ response: str
53
+ options: Optional[List[str]] = None
54
+
55
+ class UseMcpTool(BaseTool):
56
+ server_name: str
57
+ tool_name: str
58
+ arguments: Dict[str, Any]
59
+
60
+ class PlainTextOutput(BaseModel):
61
+ text: str
62
+
63
+
64
+ # Mapping from tool tag names to Pydantic models
65
+ TOOL_MODEL_MAP: Dict[str, Type[BaseTool]] = {
66
+ "execute_command": ExecuteCommandTool,
67
+ "read_file": ReadFileTool,
68
+ "write_to_file": WriteToFileTool,
69
+ "replace_in_file": ReplaceInFileTool,
70
+ "search_files": SearchFilesTool,
71
+ "list_files": ListFilesTool,
72
+ "list_code_definition_names": ListCodeDefinitionNamesTool,
73
+ "ask_followup_question": AskFollowupQuestionTool,
74
+ "attempt_completion": AttemptCompletionTool,
75
+ "plan_mode_respond": PlanModeRespondTool,
76
+ "use_mcp_tool": UseMcpTool,
77
+ }
78
+
79
+
80
+ class AgenticEditRequest(BaseModel):
81
+ user_input: str
82
+
83
+
84
+ class FileOperation(BaseModel):
85
+ path: str
86
+ operation: str # e.g., "MODIFY", "REFERENCE", "ADD", "REMOVE"
87
+ class MemoryConfig(BaseModel):
88
+ """
89
+ A model to encapsulate memory configuration and operations.
90
+ """
91
+
92
+ memory: Dict[str, Any]
93
+ save_memory_func: SkipValidation[Callable]
94
+
95
+ class Config:
96
+ arbitrary_types_allowed = True
97
+
98
+
99
+ class CommandConfig(BaseModel):
100
+ coding: SkipValidation[Callable]
101
+ chat: SkipValidation[Callable]
102
+ add_files: SkipValidation[Callable]
103
+ remove_files: SkipValidation[Callable]
104
+ index_build: SkipValidation[Callable]
105
+ index_query: SkipValidation[Callable]
106
+ list_files: SkipValidation[Callable]
107
+ ask: SkipValidation[Callable]
108
+ revert: SkipValidation[Callable]
109
+ commit: SkipValidation[Callable]
110
+ help: SkipValidation[Callable]
111
+ exclude_dirs: SkipValidation[Callable]
112
+ summon: SkipValidation[Callable]
113
+ design: SkipValidation[Callable]
114
+ mcp: SkipValidation[Callable]
115
+ models: SkipValidation[Callable]
116
+ lib: SkipValidation[Callable]
117
+ execute_shell_command: SkipValidation[Callable]
118
+ generate_shell_command: SkipValidation[Callable]
119
+ conf_export: SkipValidation[Callable]
120
+ conf_import: SkipValidation[Callable]
121
+ index_export: SkipValidation[Callable]
122
+ index_import: SkipValidation[Callable]
123
+ exclude_files: SkipValidation[Callable]
124
+
autocoder/auto_coder.py CHANGED
@@ -52,6 +52,11 @@ from autocoder.privacy.model_filter import ModelPathFilter
52
52
  from autocoder.common.result_manager import ResultManager
53
53
  from autocoder.events.event_manager_singleton import get_event_manager
54
54
  from autocoder.events import event_content as EventContentCreator
55
+ from autocoder.common.mcp_server import get_mcp_server
56
+ from autocoder.common.mcp_server_types import (
57
+ McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest,
58
+ McpListRunningRequest, McpRefreshRequest
59
+ )
55
60
 
56
61
  console = Console()
57
62
 
@@ -261,7 +266,8 @@ def main(input_args: Optional[List[str]] = None):
261
266
  "saas.base_url": model_info["base_url"],
262
267
  "saas.api_key": model_info["api_key"],
263
268
  "saas.model": model_info["model_name"],
264
- "saas.is_reasoning": model_info["is_reasoning"]
269
+ "saas.is_reasoning": model_info["is_reasoning"],
270
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
265
271
  }
266
272
  )
267
273
 
@@ -284,7 +290,8 @@ def main(input_args: Optional[List[str]] = None):
284
290
  "saas.base_url": model_info["base_url"],
285
291
  "saas.api_key": model_info["api_key"],
286
292
  "saas.model": model_info["model_name"],
287
- "saas.is_reasoning": model_info["is_reasoning"]
293
+ "saas.is_reasoning": model_info["is_reasoning"],
294
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
288
295
  }
289
296
  )
290
297
  models.append(code_model)
@@ -302,7 +309,8 @@ def main(input_args: Optional[List[str]] = None):
302
309
  "saas.base_url": model_info["base_url"],
303
310
  "saas.api_key": model_info["api_key"],
304
311
  "saas.model": model_info["model_name"],
305
- "saas.is_reasoning": model_info["is_reasoning"]
312
+ "saas.is_reasoning": model_info["is_reasoning"],
313
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
306
314
  }
307
315
  )
308
316
  llm.setup_sub_client("code_model", code_model)
@@ -323,7 +331,8 @@ def main(input_args: Optional[List[str]] = None):
323
331
  "saas.base_url": model_info["base_url"],
324
332
  "saas.api_key": model_info["api_key"],
325
333
  "saas.model": model_info["model_name"],
326
- "saas.is_reasoning": model_info["is_reasoning"]
334
+ "saas.is_reasoning": model_info["is_reasoning"],
335
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
327
336
  }
328
337
  )
329
338
  models.append(rerank_model)
@@ -341,7 +350,8 @@ def main(input_args: Optional[List[str]] = None):
341
350
  "saas.base_url": model_info["base_url"],
342
351
  "saas.api_key": model_info["api_key"],
343
352
  "saas.model": model_info["model_name"],
344
- "saas.is_reasoning": model_info["is_reasoning"]
353
+ "saas.is_reasoning": model_info["is_reasoning"],
354
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
345
355
  }
346
356
  )
347
357
  llm.setup_sub_client("generate_rerank_model", rerank_model)
@@ -358,7 +368,8 @@ def main(input_args: Optional[List[str]] = None):
358
368
  "saas.base_url": model_info["base_url"],
359
369
  "saas.api_key": model_info["api_key"],
360
370
  "saas.model": model_info["model_name"],
361
- "saas.is_reasoning": model_info["is_reasoning"]
371
+ "saas.is_reasoning": model_info["is_reasoning"],
372
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
362
373
  }
363
374
  )
364
375
  llm.setup_sub_client("inference_model", inference_model)
@@ -375,7 +386,8 @@ def main(input_args: Optional[List[str]] = None):
375
386
  "saas.base_url": model_info["base_url"],
376
387
  "saas.api_key": model_info["api_key"],
377
388
  "saas.model": model_info["model_name"],
378
- "saas.is_reasoning": model_info["is_reasoning"]
389
+ "saas.is_reasoning": model_info["is_reasoning"],
390
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
379
391
  }
380
392
  )
381
393
  llm.setup_sub_client("index_filter_model", index_filter_model)
@@ -541,7 +553,8 @@ def main(input_args: Optional[List[str]] = None):
541
553
  "saas.base_url": model_info["base_url"],
542
554
  "saas.api_key": model_info["api_key"],
543
555
  "saas.model": model_info["model_name"],
544
- "saas.is_reasoning": model_info["is_reasoning"]
556
+ "saas.is_reasoning": model_info["is_reasoning"],
557
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
545
558
  }
546
559
  )
547
560
  llm.setup_sub_client("chat_model", chat_model)
@@ -558,7 +571,8 @@ def main(input_args: Optional[List[str]] = None):
558
571
  "saas.base_url": model_info["base_url"],
559
572
  "saas.api_key": model_info["api_key"],
560
573
  "saas.model": model_info["model_name"],
561
- "saas.is_reasoning": model_info["is_reasoning"]
574
+ "saas.is_reasoning": model_info["is_reasoning"],
575
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
562
576
  }
563
577
  )
564
578
  llm.setup_sub_client("vl_model", vl_model)
@@ -575,7 +589,8 @@ def main(input_args: Optional[List[str]] = None):
575
589
  "saas.base_url": model_info["base_url"],
576
590
  "saas.api_key": model_info["api_key"],
577
591
  "saas.model": model_info["model_name"],
578
- "saas.is_reasoning": model_info["is_reasoning"]
592
+ "saas.is_reasoning": model_info["is_reasoning"],
593
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
579
594
  }
580
595
  )
581
596
  llm.setup_sub_client("index_model", index_model)
@@ -592,7 +607,8 @@ def main(input_args: Optional[List[str]] = None):
592
607
  "saas.base_url": model_info["base_url"],
593
608
  "saas.api_key": model_info["api_key"],
594
609
  "saas.model": model_info["model_name"],
595
- "saas.is_reasoning": model_info["is_reasoning"]
610
+ "saas.is_reasoning": model_info["is_reasoning"],
611
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
596
612
  }
597
613
  )
598
614
  llm.setup_sub_client("sd_model", sd_model)
@@ -609,7 +625,8 @@ def main(input_args: Optional[List[str]] = None):
609
625
  "saas.base_url": model_info["base_url"],
610
626
  "saas.api_key": model_info["api_key"],
611
627
  "saas.model": model_info["model_name"],
612
- "saas.is_reasoning": model_info["is_reasoning"]
628
+ "saas.is_reasoning": model_info["is_reasoning"],
629
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
613
630
  }
614
631
  )
615
632
  llm.setup_sub_client("text2voice_model", text2voice_model)
@@ -626,7 +643,8 @@ def main(input_args: Optional[List[str]] = None):
626
643
  "saas.base_url": model_info["base_url"],
627
644
  "saas.api_key": model_info["api_key"],
628
645
  "saas.model": model_info["model_name"],
629
- "saas.is_reasoning": model_info["is_reasoning"]
646
+ "saas.is_reasoning": model_info["is_reasoning"],
647
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
630
648
  }
631
649
  )
632
650
  llm.setup_sub_client("voice2text_model", voice2text_model)
@@ -643,7 +661,8 @@ def main(input_args: Optional[List[str]] = None):
643
661
  "saas.base_url": model_info["base_url"],
644
662
  "saas.api_key": model_info["api_key"],
645
663
  "saas.model": model_info["model_name"],
646
- "saas.is_reasoning": model_info["is_reasoning"]
664
+ "saas.is_reasoning": model_info["is_reasoning"],
665
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
647
666
  }
648
667
  )
649
668
  llm.setup_sub_client("planner_model", planner_model)
@@ -660,7 +679,8 @@ def main(input_args: Optional[List[str]] = None):
660
679
  "saas.base_url": model_info["base_url"],
661
680
  "saas.api_key": model_info["api_key"],
662
681
  "saas.model": model_info["model_name"],
663
- "saas.is_reasoning": model_info["is_reasoning"]
682
+ "saas.is_reasoning": model_info["is_reasoning"],
683
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
664
684
  }
665
685
  )
666
686
  llm.setup_sub_client("commit_model", commit_model)
@@ -677,7 +697,8 @@ def main(input_args: Optional[List[str]] = None):
677
697
  "saas.base_url": model_info["base_url"],
678
698
  "saas.api_key": model_info["api_key"],
679
699
  "saas.model": model_info["model_name"],
680
- "saas.is_reasoning": model_info["is_reasoning"]
700
+ "saas.is_reasoning": model_info["is_reasoning"],
701
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
681
702
  }
682
703
  )
683
704
  llm.setup_sub_client("designer_model", designer_model)
@@ -694,7 +715,8 @@ def main(input_args: Optional[List[str]] = None):
694
715
  "saas.base_url": model_info["base_url"],
695
716
  "saas.api_key": model_info["api_key"],
696
717
  "saas.model": model_info["model_name"],
697
- "saas.is_reasoning": model_info["is_reasoning"]
718
+ "saas.is_reasoning": model_info["is_reasoning"],
719
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
698
720
  }
699
721
  )
700
722
  llm.setup_sub_client("emb_model", emb_model)
@@ -1254,7 +1276,6 @@ def main(input_args: Optional[List[str]] = None):
1254
1276
  v = (item for item in response)
1255
1277
 
1256
1278
  elif "mcp" in commands_info:
1257
- from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest
1258
1279
  mcp_server = get_mcp_server()
1259
1280
 
1260
1281
  pos_args = commands_info["mcp"].get("args", [])
@@ -636,7 +636,8 @@ def main(input_args: Optional[List[str]] = None):
636
636
  "saas.base_url": model_info["base_url"],
637
637
  "saas.api_key": model_info["api_key"],
638
638
  "saas.model": model_info["model_name"],
639
- "saas.is_reasoning": model_info["is_reasoning"]
639
+ "saas.is_reasoning": model_info["is_reasoning"],
640
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
640
641
  }
641
642
  )
642
643
 
@@ -652,7 +653,8 @@ def main(input_args: Optional[List[str]] = None):
652
653
  "saas.base_url": model_info["base_url"],
653
654
  "saas.api_key": model_info["api_key"],
654
655
  "saas.model": model_info["model_name"],
655
- "saas.is_reasoning": model_info["is_reasoning"]
656
+ "saas.is_reasoning": model_info["is_reasoning"],
657
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
656
658
  }
657
659
  )
658
660
  llm.setup_sub_client("recall_model", recall_model)
@@ -668,7 +670,8 @@ def main(input_args: Optional[List[str]] = None):
668
670
  "saas.base_url": model_info["base_url"],
669
671
  "saas.api_key": model_info["api_key"],
670
672
  "saas.model": model_info["model_name"],
671
- "saas.is_reasoning": model_info["is_reasoning"]
673
+ "saas.is_reasoning": model_info["is_reasoning"],
674
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
672
675
  }
673
676
  )
674
677
  llm.setup_sub_client("chunk_model", chunk_model)
@@ -684,7 +687,8 @@ def main(input_args: Optional[List[str]] = None):
684
687
  "saas.base_url": model_info["base_url"],
685
688
  "saas.api_key": model_info["api_key"],
686
689
  "saas.model": model_info["model_name"],
687
- "saas.is_reasoning": model_info["is_reasoning"]
690
+ "saas.is_reasoning": model_info["is_reasoning"],
691
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
688
692
  }
689
693
  )
690
694
  llm.setup_sub_client("qa_model", qa_model)
@@ -700,7 +704,8 @@ def main(input_args: Optional[List[str]] = None):
700
704
  "saas.base_url": model_info["base_url"],
701
705
  "saas.api_key": model_info["api_key"],
702
706
  "saas.model": model_info["model_name"],
703
- "saas.is_reasoning": False
707
+ "saas.is_reasoning": False,
708
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
704
709
  }
705
710
  )
706
711
  llm.setup_sub_client("emb_model", emb_model)
@@ -792,7 +797,8 @@ def main(input_args: Optional[List[str]] = None):
792
797
  "saas.base_url": model_info["base_url"],
793
798
  "saas.api_key": model_info["api_key"],
794
799
  "saas.model": model_info["model_name"],
795
- "saas.is_reasoning": model_info["is_reasoning"]
800
+ "saas.is_reasoning": model_info["is_reasoning"],
801
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
796
802
  }
797
803
  )
798
804
 
@@ -806,7 +812,8 @@ def main(input_args: Optional[List[str]] = None):
806
812
  "saas.base_url": model_info["base_url"],
807
813
  "saas.api_key": model_info["api_key"],
808
814
  "saas.model": model_info["model_name"],
809
- "saas.is_reasoning": False
815
+ "saas.is_reasoning": False,
816
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
810
817
  }
811
818
  )
812
819
  llm.setup_sub_client("emb_model", emb_model)
@@ -852,7 +859,8 @@ def main(input_args: Optional[List[str]] = None):
852
859
  "saas.base_url": model_info["base_url"],
853
860
  "saas.api_key": model_info["api_key"],
854
861
  "saas.model": model_info["model_name"],
855
- "saas.is_reasoning": model_info["is_reasoning"]
862
+ "saas.is_reasoning": model_info["is_reasoning"],
863
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
856
864
  }
857
865
  )
858
866
 
@@ -882,7 +890,8 @@ def main(input_args: Optional[List[str]] = None):
882
890
  "saas.base_url": model_info["base_url"],
883
891
  "saas.api_key": model_info["api_key"],
884
892
  "saas.model": model_info["model_name"],
885
- "saas.is_reasoning": model_info["is_reasoning"]
893
+ "saas.is_reasoning": model_info["is_reasoning"],
894
+ "saas.max_output_tokens": model_info.get("max_output_tokens", 8096)
886
895
  }
887
896
  )
888
897
 
@@ -40,7 +40,11 @@ import git
40
40
  from autocoder.common import git_utils
41
41
  from autocoder.chat_auto_coder_lang import get_message
42
42
  from autocoder.agent.auto_guess_query import AutoGuessQuery
43
- from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest,McpServerInfoRequest
43
+ from autocoder.common.mcp_server import get_mcp_server
44
+ from autocoder.common.mcp_server_types import (
45
+ McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest,
46
+ McpListRunningRequest, McpRefreshRequest, McpServerInfoRequest
47
+ )
44
48
  import byzerllm
45
49
  from byzerllm.utils import format_str_jinja2
46
50
  from autocoder.common.memory_manager import get_global_memory_file_paths
@@ -54,11 +58,12 @@ from autocoder.memory.active_context_manager import ActiveContextManager
54
58
  from autocoder.common.command_completer import CommandCompleter,FileSystemModel as CCFileSystemModel,MemoryConfig as CCMemoryModel
55
59
  from autocoder.common.conf_validator import ConfigValidator
56
60
  from autocoder import command_parser as CommandParser
57
- from loguru import logger
61
+ from loguru import logger as global_logger
58
62
  from autocoder.utils.project_structure import EnhancedFileAnalyzer
63
+ from autocoder.common import SourceCodeList
59
64
 
60
- ## 对外API,用于第三方集成 auto-coder 使用。
61
65
 
66
+ ## 对外API,用于第三方集成 auto-coder 使用。
62
67
  class SymbolItem(BaseModel):
63
68
  symbol_name: str
64
69
  symbol_type: SymbolType
@@ -218,6 +223,36 @@ def get_all_extensions(directory: str = ".") -> str:
218
223
  # 转换为逗号分隔的字符串
219
224
  return ",".join(sorted(all_extensions))
220
225
 
226
+ def configure_logger():
227
+ # 设置日志目录和文件
228
+ log_dir = os.path.join(project_root, ".auto-coder", "logs")
229
+ os.makedirs(log_dir, exist_ok=True)
230
+ log_file = os.path.join(log_dir, "auto-coder.log")
231
+
232
+ # 配置全局日志
233
+ # 默认情况下,所有日志都写入文件
234
+ # 控制台上默认不输出任何日志,除非显式配置
235
+ global_logger.configure(
236
+ handlers=[
237
+ {
238
+ "sink": log_file,
239
+ "level": "INFO",
240
+ "rotation": "10 MB",
241
+ "retention": "1 week",
242
+ "format": "{time:YYYY-MM-DD HH:mm:ss} | {level} | {name} | {message}",
243
+ },
244
+ {
245
+ "sink": sys.stdout,
246
+ "level": "INFO",
247
+ "format": "{time:YYYY-MM-DD HH:mm:ss} | {name} | {message}",
248
+ # 默认不打印任何日志到控制台
249
+ "filter": lambda record: False
250
+ }
251
+ ]
252
+ )
253
+
254
+ configure_logger()
255
+
221
256
  def initialize_system(args:InitializeSystemRequest):
222
257
  from autocoder.utils.model_provider_selector import ModelProviderSelector
223
258
  from autocoder import models as models_module
@@ -2743,11 +2778,21 @@ def conf_import(path: str):
2743
2778
  import_conf(os.getcwd(), path)
2744
2779
 
2745
2780
  @run_in_raw_thread()
2746
- def auto_command(query: str,extra_args: Dict[str,Any]={}):
2747
- """处理/auto指令"""
2781
+ def auto_command(query: str,extra_args: Dict[str,Any]={}):
2782
+ """处理/auto指令"""
2748
2783
  from autocoder.commands.auto_command import CommandAutoTuner, AutoCommandRequest, CommandConfig, MemoryConfig
2749
2784
  args = get_final_config()
2750
2785
 
2786
+ if args.enable_agentic_edit:
2787
+ from autocoder.common.v2.agent.agentic_edit import AgenticEdit,AgenticEditRequest
2788
+ llm = get_single_llm(args.code_model or args.model,product_mode=args.product_mode)
2789
+ agent = AgenticEdit(llm=llm,args=args,files=SourceCodeList(sources=[]),
2790
+ conversation_history=[],
2791
+ memory_config=MemoryConfig(memory=memory,
2792
+ save_memory_func=save_memory), command_config=CommandConfig)
2793
+ agent.run_in_terminal(AgenticEditRequest(user_input=query))
2794
+ return
2795
+
2751
2796
  # 准备请求参数
2752
2797
  request = AutoCommandRequest(
2753
2798
  user_input=query
@@ -30,6 +30,14 @@ MESSAGES = {
30
30
  "en": "Available builtin MCP servers:",
31
31
  "zh": "可用的内置 MCP 服务器:"
32
32
  },
33
+ "mcp_list_external_title": {
34
+ "en": "Available external MCP servers:",
35
+ "zh": "可用的外部 MCP 服务器:"
36
+ },
37
+ "mcp_list_marketplace_title": {
38
+ "en": "Available marketplace MCP servers:",
39
+ "zh": "可用的市场 MCP 服务器:"
40
+ },
33
41
  "mcp_refresh_error": {
34
42
  "en": "Error refreshing MCP servers: {{error}}",
35
43
  "zh": "刷新 MCP 服务器时出错:{{error}}"
@@ -543,8 +551,8 @@ MESSAGES = {
543
551
  "zh": "用法: /plugins <命令>\n可用的子命令:\n /plugins /list - 列出所有可用插件\n /plugins /load <名称> - 加载一个插件\n /plugins /unload <名称> - 卸载一个插件\n /plugins/dirs - 列出插件目录\n /plugins/dirs /add <路径> - 添加一个插件目录\n /plugins/dirs /remove <路径> - 移除一个插件目录\n /plugins/dirs /clear - 清除所有插件目录"
544
552
  },
545
553
  "mcp_server_info_error": {
546
- "en": "Error getting MCP server info: {{ error }}",
547
- "zh": "获取MCP服务器信息时出错: {{ error }}"
554
+ "en": "Error getting MCP server info: {{error}}",
555
+ "zh": "获取 MCP 服务器信息时出错:{{error}}"
548
556
  },
549
557
  "mcp_server_info_title": {
550
558
  "en": "Connected MCP Server Info",
@@ -553,6 +561,14 @@ MESSAGES = {
553
561
  "active_context_desc": {
554
562
  "en": "Manage active context tasks, list all tasks and their status",
555
563
  "zh": "管理活动上下文任务,列出所有任务及其状态"
564
+ },
565
+ "marketplace_add_success": {
566
+ "en": "Successfully added marketplace item: {{name}}",
567
+ "zh": "成功添加市场项目:{{name}}"
568
+ },
569
+ "marketplace_add_error": {
570
+ "en": "Error adding marketplace item: {{name}} - {{error}}",
571
+ "zh": "添加市场项目时出错:{{name}} - {{error}}"
556
572
  }
557
573
  }
558
574
 
@@ -42,6 +42,11 @@ from autocoder.events.event_manager_singleton import get_event_manager
42
42
  from autocoder.events import event_content as EventContentCreator
43
43
  from autocoder.linters.linter_factory import LinterFactory, lint_file, lint_project, format_lint_result
44
44
  import traceback
45
+ from autocoder.common.mcp_server import get_mcp_server
46
+ from autocoder.common.mcp_server_types import (
47
+ McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest,
48
+ McpListRunningRequest, McpRefreshRequest
49
+ )
45
50
 
46
51
 
47
52
  @byzerllm.prompt()
@@ -76,7 +81,6 @@ class AutoCommandTools:
76
81
  self.printer = Printer()
77
82
 
78
83
  def execute_mcp_server(self, query: str) -> str:
79
- from autocoder.common.mcp_server import get_mcp_server, McpRequest, McpInstallRequest, McpRemoveRequest, McpListRequest, McpListRunningRequest, McpRefreshRequest
80
84
  mcp_server = get_mcp_server()
81
85
  response = mcp_server.send_request(
82
86
  McpRequest(
@@ -272,6 +272,7 @@ class AutoCoderArgs(pydantic.BaseModel):
272
272
  index_model_anti_quota_limit: Optional[int] = 0
273
273
 
274
274
  enable_agentic_filter: Optional[bool] = False
275
+ enable_agentic_edit: Optional[bool] = False
275
276
 
276
277
 
277
278
  index_filter_level: Optional[int] = 0
@@ -416,6 +417,7 @@ class AutoCoderArgs(pydantic.BaseModel):
416
417
  event_file: Optional[str] = None
417
418
 
418
419
  enable_active_context: Optional[bool] = False
420
+ enable_active_context_in_generate: Optional[bool] = False
419
421
 
420
422
  generate_max_rounds: Optional[int] = 5
421
423