auto-coder 0.1.363__py3-none-any.whl → 0.1.365__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of auto-coder might be problematic. Click here for more details.
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/METADATA +2 -2
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/RECORD +39 -23
- autocoder/agent/base_agentic/tools/execute_command_tool_resolver.py +1 -1
- autocoder/auto_coder.py +46 -2
- autocoder/auto_coder_runner.py +2 -0
- autocoder/common/__init__.py +5 -0
- autocoder/common/file_checkpoint/__init__.py +21 -0
- autocoder/common/file_checkpoint/backup.py +264 -0
- autocoder/common/file_checkpoint/conversation_checkpoint.py +182 -0
- autocoder/common/file_checkpoint/examples.py +217 -0
- autocoder/common/file_checkpoint/manager.py +611 -0
- autocoder/common/file_checkpoint/models.py +156 -0
- autocoder/common/file_checkpoint/store.py +383 -0
- autocoder/common/file_checkpoint/test_backup.py +242 -0
- autocoder/common/file_checkpoint/test_manager.py +570 -0
- autocoder/common/file_checkpoint/test_models.py +360 -0
- autocoder/common/file_checkpoint/test_store.py +327 -0
- autocoder/common/file_checkpoint/test_utils.py +297 -0
- autocoder/common/file_checkpoint/utils.py +119 -0
- autocoder/common/rulefiles/autocoderrules_utils.py +114 -55
- autocoder/common/save_formatted_log.py +76 -5
- autocoder/common/utils_code_auto_generate.py +2 -1
- autocoder/common/v2/agent/agentic_edit.py +545 -225
- autocoder/common/v2/agent/agentic_edit_tools/list_files_tool_resolver.py +83 -43
- autocoder/common/v2/agent/agentic_edit_tools/read_file_tool_resolver.py +116 -29
- autocoder/common/v2/agent/agentic_edit_tools/replace_in_file_tool_resolver.py +179 -48
- autocoder/common/v2/agent/agentic_edit_tools/search_files_tool_resolver.py +101 -56
- autocoder/common/v2/agent/agentic_edit_tools/test_write_to_file_tool_resolver.py +322 -0
- autocoder/common/v2/agent/agentic_edit_tools/write_to_file_tool_resolver.py +173 -132
- autocoder/common/v2/agent/agentic_edit_types.py +4 -0
- autocoder/compilers/normal_compiler.py +64 -0
- autocoder/events/event_manager_singleton.py +133 -4
- autocoder/linters/normal_linter.py +373 -0
- autocoder/linters/python_linter.py +4 -2
- autocoder/version.py +1 -1
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/LICENSE +0 -0
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/WHEEL +0 -0
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/entry_points.txt +0 -0
- {auto_coder-0.1.363.dist-info → auto_coder-0.1.365.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import re
|
|
3
3
|
import glob
|
|
4
|
-
from typing import Dict, Any, Optional
|
|
4
|
+
from typing import Dict, Any, Optional, List, Union
|
|
5
5
|
from autocoder.common.v2.agent.agentic_edit_tools.base_tool_resolver import BaseToolResolver
|
|
6
6
|
from autocoder.common.v2.agent.agentic_edit_types import SearchFilesTool, ToolResult # Import ToolResult from types
|
|
7
7
|
from loguru import logger
|
|
@@ -20,14 +20,54 @@ class SearchFilesToolResolver(BaseToolResolver):
|
|
|
20
20
|
self.tool: SearchFilesTool = tool
|
|
21
21
|
self.shadow_manager = self.agent.shadow_manager if self.agent else None
|
|
22
22
|
|
|
23
|
-
def
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
23
|
+
def search_in_dir(self, base_dir: str, regex_pattern: str, file_pattern: str, source_dir: str, is_shadow: bool = False, compiled_regex: Optional[re.Pattern] = None) -> List[Dict[str, Any]]:
|
|
24
|
+
"""Helper function to search in a directory"""
|
|
25
|
+
search_results = []
|
|
26
|
+
search_glob_pattern = os.path.join(base_dir, "**", file_pattern)
|
|
27
|
+
|
|
28
|
+
logger.info(f"Searching for regex '{regex_pattern}' in files matching '{file_pattern}' under '{base_dir}' (shadow: {is_shadow}) with ignore rules applied.")
|
|
29
|
+
|
|
30
|
+
if compiled_regex is None:
|
|
31
|
+
compiled_regex = re.compile(regex_pattern)
|
|
32
|
+
|
|
33
|
+
for filepath in glob.glob(search_glob_pattern, recursive=True):
|
|
34
|
+
abs_path = os.path.abspath(filepath)
|
|
35
|
+
if should_ignore(abs_path):
|
|
36
|
+
continue
|
|
30
37
|
|
|
38
|
+
if os.path.isfile(filepath):
|
|
39
|
+
try:
|
|
40
|
+
with open(filepath, 'r', encoding='utf-8', errors='replace') as f:
|
|
41
|
+
lines = f.readlines()
|
|
42
|
+
for i, line in enumerate(lines):
|
|
43
|
+
if compiled_regex.search(line):
|
|
44
|
+
context_start = max(0, i - 2)
|
|
45
|
+
context_end = min(len(lines), i + 3)
|
|
46
|
+
context = "".join([f"{j+1}: {lines[j]}" for j in range(context_start, context_end)])
|
|
47
|
+
|
|
48
|
+
if is_shadow and self.shadow_manager:
|
|
49
|
+
try:
|
|
50
|
+
abs_project_path = self.shadow_manager.from_shadow_path(filepath)
|
|
51
|
+
relative_path = os.path.relpath(abs_project_path, source_dir)
|
|
52
|
+
except Exception:
|
|
53
|
+
relative_path = os.path.relpath(filepath, source_dir)
|
|
54
|
+
else:
|
|
55
|
+
relative_path = os.path.relpath(filepath, source_dir)
|
|
56
|
+
|
|
57
|
+
search_results.append({
|
|
58
|
+
"path": relative_path,
|
|
59
|
+
"line_number": i + 1,
|
|
60
|
+
"match_line": line.strip(),
|
|
61
|
+
"context": context.strip()
|
|
62
|
+
})
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logger.warning(f"Could not read or process file {filepath}: {e}")
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
return search_results
|
|
68
|
+
|
|
69
|
+
def search_files_with_shadow(self, search_path_str: str, regex_pattern: str, file_pattern: str, source_dir: str, absolute_source_dir: str, absolute_search_path: str) -> Union[ToolResult, List[Dict[str, Any]]]:
|
|
70
|
+
"""Search files using shadow manager for path translation"""
|
|
31
71
|
# Security check
|
|
32
72
|
if not absolute_search_path.startswith(absolute_source_dir):
|
|
33
73
|
return ToolResult(success=False, message=f"Error: Access denied. Attempted to search outside the project directory: {search_path_str}")
|
|
@@ -54,58 +94,15 @@ class SearchFilesToolResolver(BaseToolResolver):
|
|
|
54
94
|
try:
|
|
55
95
|
compiled_regex = re.compile(regex_pattern)
|
|
56
96
|
|
|
57
|
-
# Helper function to search in a directory
|
|
58
|
-
def search_in_dir(base_dir, is_shadow=False):
|
|
59
|
-
search_results = []
|
|
60
|
-
search_glob_pattern = os.path.join(base_dir, "**", file_pattern)
|
|
61
|
-
|
|
62
|
-
logger.info(f"Searching for regex '{regex_pattern}' in files matching '{file_pattern}' under '{base_dir}' (shadow: {is_shadow}) with ignore rules applied.")
|
|
63
|
-
|
|
64
|
-
for filepath in glob.glob(search_glob_pattern, recursive=True):
|
|
65
|
-
abs_path = os.path.abspath(filepath)
|
|
66
|
-
if should_ignore(abs_path):
|
|
67
|
-
continue
|
|
68
|
-
|
|
69
|
-
if os.path.isfile(filepath):
|
|
70
|
-
try:
|
|
71
|
-
with open(filepath, 'r', encoding='utf-8', errors='replace') as f:
|
|
72
|
-
lines = f.readlines()
|
|
73
|
-
for i, line in enumerate(lines):
|
|
74
|
-
if compiled_regex.search(line):
|
|
75
|
-
context_start = max(0, i - 2)
|
|
76
|
-
context_end = min(len(lines), i + 3)
|
|
77
|
-
context = "".join([f"{j+1}: {lines[j]}" for j in range(context_start, context_end)])
|
|
78
|
-
|
|
79
|
-
if is_shadow and self.shadow_manager:
|
|
80
|
-
try:
|
|
81
|
-
abs_project_path = self.shadow_manager.from_shadow_path(filepath)
|
|
82
|
-
relative_path = os.path.relpath(abs_project_path, source_dir)
|
|
83
|
-
except Exception:
|
|
84
|
-
relative_path = os.path.relpath(filepath, source_dir)
|
|
85
|
-
else:
|
|
86
|
-
relative_path = os.path.relpath(filepath, source_dir)
|
|
87
|
-
|
|
88
|
-
search_results.append({
|
|
89
|
-
"path": relative_path,
|
|
90
|
-
"line_number": i + 1,
|
|
91
|
-
"match_line": line.strip(),
|
|
92
|
-
"context": context.strip()
|
|
93
|
-
})
|
|
94
|
-
except Exception as e:
|
|
95
|
-
logger.warning(f"Could not read or process file {filepath}: {e}")
|
|
96
|
-
continue
|
|
97
|
-
|
|
98
|
-
return search_results
|
|
99
|
-
|
|
100
97
|
# Search in both directories and merge results
|
|
101
98
|
shadow_results = []
|
|
102
99
|
source_results = []
|
|
103
100
|
|
|
104
101
|
if shadow_exists:
|
|
105
|
-
shadow_results = search_in_dir(shadow_dir_path, is_shadow=True)
|
|
102
|
+
shadow_results = self.search_in_dir(shadow_dir_path, regex_pattern, file_pattern, source_dir, is_shadow=True, compiled_regex=compiled_regex)
|
|
106
103
|
|
|
107
104
|
if os.path.exists(absolute_search_path) and os.path.isdir(absolute_search_path):
|
|
108
|
-
source_results = search_in_dir(absolute_search_path, is_shadow=False)
|
|
105
|
+
source_results = self.search_in_dir(absolute_search_path, regex_pattern, file_pattern, source_dir, is_shadow=False, compiled_regex=compiled_regex)
|
|
109
106
|
|
|
110
107
|
# Merge results, prioritizing shadow results
|
|
111
108
|
# Create a dictionary for quick lookup
|
|
@@ -122,9 +119,34 @@ class SearchFilesToolResolver(BaseToolResolver):
|
|
|
122
119
|
# Convert back to list
|
|
123
120
|
merged_results = list(results_dict.values())
|
|
124
121
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
122
|
+
return merged_results
|
|
123
|
+
|
|
124
|
+
except re.error as e:
|
|
125
|
+
logger.error(f"Invalid regex pattern '{regex_pattern}': {e}")
|
|
126
|
+
return ToolResult(success=False, message=f"Invalid regex pattern: {e}")
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error(f"Error during file search: {str(e)}")
|
|
129
|
+
return ToolResult(success=False, message=f"An unexpected error occurred during search: {str(e)}")
|
|
130
|
+
|
|
131
|
+
def search_files_normal(self, search_path_str: str, regex_pattern: str, file_pattern: str, source_dir: str, absolute_source_dir: str, absolute_search_path: str) -> Union[ToolResult, List[Dict[str, Any]]]:
|
|
132
|
+
"""Search files directly without using shadow manager"""
|
|
133
|
+
# Security check
|
|
134
|
+
if not absolute_search_path.startswith(absolute_source_dir):
|
|
135
|
+
return ToolResult(success=False, message=f"Error: Access denied. Attempted to search outside the project directory: {search_path_str}")
|
|
136
|
+
|
|
137
|
+
# Validate that the directory exists
|
|
138
|
+
if not os.path.exists(absolute_search_path):
|
|
139
|
+
return ToolResult(success=False, message=f"Error: Search path not found: {search_path_str}")
|
|
140
|
+
if not os.path.isdir(absolute_search_path):
|
|
141
|
+
return ToolResult(success=False, message=f"Error: Search path is not a directory: {search_path_str}")
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
compiled_regex = re.compile(regex_pattern)
|
|
145
|
+
|
|
146
|
+
# Search in the directory
|
|
147
|
+
search_results = self.search_in_dir(absolute_search_path, regex_pattern, file_pattern, source_dir, is_shadow=False, compiled_regex=compiled_regex)
|
|
148
|
+
|
|
149
|
+
return search_results
|
|
128
150
|
|
|
129
151
|
except re.error as e:
|
|
130
152
|
logger.error(f"Invalid regex pattern '{regex_pattern}': {e}")
|
|
@@ -132,3 +154,26 @@ class SearchFilesToolResolver(BaseToolResolver):
|
|
|
132
154
|
except Exception as e:
|
|
133
155
|
logger.error(f"Error during file search: {str(e)}")
|
|
134
156
|
return ToolResult(success=False, message=f"An unexpected error occurred during search: {str(e)}")
|
|
157
|
+
|
|
158
|
+
def resolve(self) -> ToolResult:
|
|
159
|
+
"""Resolve the search files tool by calling the appropriate implementation"""
|
|
160
|
+
search_path_str = self.tool.path
|
|
161
|
+
regex_pattern = self.tool.regex
|
|
162
|
+
file_pattern = self.tool.file_pattern or "*"
|
|
163
|
+
source_dir = self.args.source_dir or "."
|
|
164
|
+
absolute_source_dir = os.path.abspath(source_dir)
|
|
165
|
+
absolute_search_path = os.path.abspath(os.path.join(source_dir, search_path_str))
|
|
166
|
+
|
|
167
|
+
# Choose the appropriate implementation based on whether shadow_manager is available
|
|
168
|
+
if self.shadow_manager:
|
|
169
|
+
result = self.search_files_with_shadow(search_path_str, regex_pattern, file_pattern, source_dir, absolute_source_dir, absolute_search_path)
|
|
170
|
+
else:
|
|
171
|
+
result = self.search_files_normal(search_path_str, regex_pattern, file_pattern, source_dir, absolute_source_dir, absolute_search_path)
|
|
172
|
+
|
|
173
|
+
# Handle the case where the implementation returns a list instead of a ToolResult
|
|
174
|
+
if isinstance(result, list):
|
|
175
|
+
message = f"Search completed. Found {len(result)} matches."
|
|
176
|
+
logger.info(message)
|
|
177
|
+
return ToolResult(success=True, message=message, content=result)
|
|
178
|
+
else:
|
|
179
|
+
return result
|
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
|
|
2
|
+
import pytest
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import json
|
|
6
|
+
from unittest.mock import MagicMock, patch
|
|
7
|
+
|
|
8
|
+
from autocoder.common import AutoCoderArgs
|
|
9
|
+
from autocoder.common.v2.agent.agentic_edit_types import WriteToFileTool, ToolResult
|
|
10
|
+
from autocoder.common.v2.agent.agentic_edit_tools.write_to_file_tool_resolver import WriteToFileToolResolver
|
|
11
|
+
from autocoder.auto_coder_runner import load_tokenizer as load_tokenizer_global
|
|
12
|
+
from autocoder.utils.llms import get_single_llm
|
|
13
|
+
from autocoder.common.file_monitor.monitor import get_file_monitor, FileMonitor
|
|
14
|
+
from autocoder.common.rulefiles.autocoderrules_utils import get_rules, reset_rules_manager
|
|
15
|
+
from loguru import logger
|
|
16
|
+
|
|
17
|
+
# Helper to create a temporary test directory
|
|
18
|
+
@pytest.fixture(scope="function")
|
|
19
|
+
def temp_test_dir(tmp_path_factory):
|
|
20
|
+
temp_dir = tmp_path_factory.mktemp("test_write_to_file_resolver_")
|
|
21
|
+
logger.info(f"Created temp dir for test: {temp_dir}")
|
|
22
|
+
# Create a dummy .autocoderignore to avoid issues with default ignore patterns loading
|
|
23
|
+
# from unexpected places if the test is run from a different CWD.
|
|
24
|
+
with open(os.path.join(temp_dir, ".autocoderignore"), "w") as f:
|
|
25
|
+
f.write("# Dummy ignore file for tests\n")
|
|
26
|
+
yield temp_dir
|
|
27
|
+
logger.info(f"Cleaning up temp dir: {temp_dir}")
|
|
28
|
+
# shutil.rmtree(temp_dir) # tmp_path_factory handles cleanup
|
|
29
|
+
|
|
30
|
+
@pytest.fixture(scope="function")
|
|
31
|
+
def setup_file_monitor_and_rules(temp_test_dir):
|
|
32
|
+
"""Initializes FileMonitor and RulesManager for the test session."""
|
|
33
|
+
# Resetting instances to ensure test isolation
|
|
34
|
+
FileMonitor.reset_instance()
|
|
35
|
+
reset_rules_manager()
|
|
36
|
+
|
|
37
|
+
monitor = get_file_monitor(str(temp_test_dir))
|
|
38
|
+
if not monitor.is_running():
|
|
39
|
+
monitor.start()
|
|
40
|
+
logger.info(f"File monitor initialized with root: {monitor.root_dir}")
|
|
41
|
+
|
|
42
|
+
rules = get_rules(str(temp_test_dir))
|
|
43
|
+
logger.info(f"Rules loaded for dir: {temp_test_dir}, count: {len(rules)}")
|
|
44
|
+
return str(temp_test_dir)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@pytest.fixture(scope="function")
|
|
48
|
+
def load_tokenizer_fixture(setup_file_monitor_and_rules):
|
|
49
|
+
"""Loads the tokenizer."""
|
|
50
|
+
try:
|
|
51
|
+
load_tokenizer_global()
|
|
52
|
+
logger.info("Tokenizer loaded successfully.")
|
|
53
|
+
except Exception as e:
|
|
54
|
+
logger.error(f"Failed to load tokenizer: {e}")
|
|
55
|
+
# Depending on test requirements, you might want to raise an error or skip tests
|
|
56
|
+
pytest.skip(f"Skipping tests due to tokenizer loading failure: {e}")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@pytest.fixture(scope="function")
|
|
60
|
+
def test_args(temp_test_dir, setup_file_monitor_and_rules, load_tokenizer_fixture):
|
|
61
|
+
"""Provides default AutoCoderArgs for tests."""
|
|
62
|
+
args = AutoCoderArgs(
|
|
63
|
+
source_dir=str(temp_test_dir),
|
|
64
|
+
enable_auto_fix_lint=False, # Default to no linting for basic tests
|
|
65
|
+
# Potentially mock other args if needed by resolver or its dependencies
|
|
66
|
+
)
|
|
67
|
+
return args
|
|
68
|
+
|
|
69
|
+
@pytest.fixture
|
|
70
|
+
def mock_agent_no_shadow(test_args):
|
|
71
|
+
"""Mocks an AgenticEdit instance that does not provide shadow capabilities."""
|
|
72
|
+
agent = MagicMock()
|
|
73
|
+
agent.shadow_manager = None
|
|
74
|
+
agent.shadow_linter = None
|
|
75
|
+
agent.args = test_args
|
|
76
|
+
agent.record_file_change = MagicMock()
|
|
77
|
+
return agent
|
|
78
|
+
|
|
79
|
+
@pytest.fixture
|
|
80
|
+
def mock_agent_with_shadow(test_args, temp_test_dir):
|
|
81
|
+
"""Mocks an AgenticEdit instance with shadow capabilities."""
|
|
82
|
+
from autocoder.shadows.shadow_manager import ShadowManager
|
|
83
|
+
from autocoder.linters.shadow_linter import ShadowLinter
|
|
84
|
+
|
|
85
|
+
# Ensure the shadow base directory exists within the temp_test_dir for isolation
|
|
86
|
+
shadow_base_dir = os.path.join(temp_test_dir, ".auto-coder", "shadows")
|
|
87
|
+
os.makedirs(shadow_base_dir, exist_ok=True)
|
|
88
|
+
|
|
89
|
+
# Patch ShadowManager's default shadow_base to use our temp one
|
|
90
|
+
with patch('autocoder.shadows.shadow_manager.ShadowManager.DEFAULT_SHADOW_BASE_DIR', new=shadow_base_dir):
|
|
91
|
+
shadow_manager = ShadowManager(source_dir=str(temp_test_dir), event_file_id="test_event")
|
|
92
|
+
|
|
93
|
+
shadow_linter = ShadowLinter(shadow_manager=shadow_manager, verbose=False)
|
|
94
|
+
|
|
95
|
+
agent = MagicMock()
|
|
96
|
+
agent.shadow_manager = shadow_manager
|
|
97
|
+
agent.shadow_linter = shadow_linter
|
|
98
|
+
agent.args = test_args
|
|
99
|
+
agent.record_file_change = MagicMock()
|
|
100
|
+
return agent
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def test_create_new_file(test_args, temp_test_dir, mock_agent_no_shadow):
|
|
104
|
+
logger.info(f"Running test_create_new_file in {temp_test_dir}")
|
|
105
|
+
file_path = "new_file.txt"
|
|
106
|
+
content = "This is a new file."
|
|
107
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
108
|
+
|
|
109
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_no_shadow, tool=tool, args=test_args)
|
|
110
|
+
result = resolver.resolve()
|
|
111
|
+
|
|
112
|
+
assert result.success is True
|
|
113
|
+
assert "成功写入文件" in result.message or "Successfully wrote file" in result.message
|
|
114
|
+
|
|
115
|
+
expected_file_abs_path = os.path.join(temp_test_dir, file_path)
|
|
116
|
+
assert os.path.exists(expected_file_abs_path)
|
|
117
|
+
with open(expected_file_abs_path, "r", encoding="utf-8") as f:
|
|
118
|
+
assert f.read() == content
|
|
119
|
+
mock_agent_no_shadow.record_file_change.assert_called_once_with(file_path, "added", content=content, diffs=None)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def test_overwrite_existing_file(test_args, temp_test_dir, mock_agent_no_shadow):
|
|
123
|
+
logger.info(f"Running test_overwrite_existing_file in {temp_test_dir}")
|
|
124
|
+
file_path = "existing_file.txt"
|
|
125
|
+
initial_content = "Initial content."
|
|
126
|
+
new_content = "This is the new content."
|
|
127
|
+
|
|
128
|
+
abs_file_path = os.path.join(temp_test_dir, file_path)
|
|
129
|
+
with open(abs_file_path, "w", encoding="utf-8") as f:
|
|
130
|
+
f.write(initial_content)
|
|
131
|
+
|
|
132
|
+
tool = WriteToFileTool(path=file_path, content=new_content)
|
|
133
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_no_shadow, tool=tool, args=test_args)
|
|
134
|
+
result = resolver.resolve()
|
|
135
|
+
|
|
136
|
+
assert result.success is True
|
|
137
|
+
assert os.path.exists(abs_file_path)
|
|
138
|
+
with open(abs_file_path, "r", encoding="utf-f8") as f:
|
|
139
|
+
assert f.read() == new_content
|
|
140
|
+
mock_agent_no_shadow.record_file_change.assert_called_once_with(file_path, "modified", content=new_content, diffs=None)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def test_create_file_in_new_directory(test_args, temp_test_dir, mock_agent_no_shadow):
|
|
144
|
+
logger.info(f"Running test_create_file_in_new_directory in {temp_test_dir}")
|
|
145
|
+
file_path = "new_dir/another_new_dir/file.txt"
|
|
146
|
+
content = "Content in a nested directory."
|
|
147
|
+
|
|
148
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
149
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_no_shadow, tool=tool, args=test_args)
|
|
150
|
+
result = resolver.resolve()
|
|
151
|
+
|
|
152
|
+
assert result.success is True
|
|
153
|
+
expected_file_abs_path = os.path.join(temp_test_dir, file_path)
|
|
154
|
+
assert os.path.exists(expected_file_abs_path)
|
|
155
|
+
with open(expected_file_abs_path, "r", encoding="utf-8") as f:
|
|
156
|
+
assert f.read() == content
|
|
157
|
+
mock_agent_no_shadow.record_file_change.assert_called_once_with(file_path, "added", content=content, diffs=None)
|
|
158
|
+
|
|
159
|
+
def test_path_outside_project_root_fails(test_args, temp_test_dir, mock_agent_no_shadow):
|
|
160
|
+
logger.info(f"Running test_path_outside_project_root_fails in {temp_test_dir}")
|
|
161
|
+
# Construct a path that tries to go outside the source_dir
|
|
162
|
+
# Note: The resolver's check is os.path.abspath(target_path).startswith(os.path.abspath(source_dir))
|
|
163
|
+
# So, a direct "../" might be normalized. We need a path that, when absolutized,
|
|
164
|
+
# is still outside an absolutized source_dir. This is tricky if source_dir is already root-like.
|
|
165
|
+
# For this test, we'll assume source_dir is not the filesystem root.
|
|
166
|
+
|
|
167
|
+
# A more robust way is to try to write to a known safe, but distinct, temporary directory
|
|
168
|
+
another_temp_dir = temp_test_dir.parent / "another_temp_dir_for_outside_test"
|
|
169
|
+
another_temp_dir.mkdir(exist_ok=True)
|
|
170
|
+
|
|
171
|
+
# This relative path, if source_dir is temp_test_dir, would resolve outside.
|
|
172
|
+
# However, the resolver joins it with source_dir first.
|
|
173
|
+
# file_path = "../outside_file.txt" # This will be joined with source_dir
|
|
174
|
+
|
|
175
|
+
# Let's try an absolute path that is outside temp_test_dir
|
|
176
|
+
outside_abs_path = os.path.join(another_temp_dir, "outside_file.txt")
|
|
177
|
+
|
|
178
|
+
# The tool path is relative to source_dir. So, to make it point outside,
|
|
179
|
+
# we need to construct a relative path that goes "up" from source_dir.
|
|
180
|
+
# This requires knowing the relative position of temp_test_dir.
|
|
181
|
+
# A simpler test for the security check:
|
|
182
|
+
# Give an absolute path to the tool that is outside test_args.source_dir.
|
|
183
|
+
# The resolver logic is:
|
|
184
|
+
# abs_file_path = os.path.abspath(os.path.join(source_dir, file_path_from_tool))
|
|
185
|
+
# So, if file_path_from_tool is already absolute, os.path.join might behave unexpectedly on Windows.
|
|
186
|
+
# On POSIX, if file_path_from_tool is absolute, os.path.join returns file_path_from_tool.
|
|
187
|
+
|
|
188
|
+
if os.name == 'posix':
|
|
189
|
+
file_path_for_tool = outside_abs_path
|
|
190
|
+
else: # Windows, os.path.join with absolute second path is tricky
|
|
191
|
+
# For windows, this test might need adjustment or rely on the fact that
|
|
192
|
+
# the file_path parameter to the tool is *expected* to be relative.
|
|
193
|
+
# Providing an absolute path might be an invalid use case for the tool itself.
|
|
194
|
+
# The resolver's security check should still catch it if os.path.join(source_dir, abs_path)
|
|
195
|
+
# results in abs_path and abs_path is outside source_dir.
|
|
196
|
+
file_path_for_tool = outside_abs_path
|
|
197
|
+
|
|
198
|
+
content = "Attempting to write outside."
|
|
199
|
+
tool = WriteToFileTool(path=str(file_path_for_tool), content=content)
|
|
200
|
+
|
|
201
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_no_shadow, tool=tool, args=test_args)
|
|
202
|
+
result = resolver.resolve()
|
|
203
|
+
|
|
204
|
+
assert result.success is False
|
|
205
|
+
assert "访问被拒绝" in result.message or "Access denied" in result.message
|
|
206
|
+
assert not os.path.exists(outside_abs_path)
|
|
207
|
+
|
|
208
|
+
# shutil.rmtree(another_temp_dir) # Clean up the other temp dir if created by this test
|
|
209
|
+
|
|
210
|
+
def test_linting_not_called_if_disabled(test_args, temp_test_dir, mock_agent_no_shadow):
|
|
211
|
+
logger.info(f"Running test_linting_not_called_if_disabled in {temp_test_dir}")
|
|
212
|
+
test_args.enable_auto_fix_lint = False # Explicitly disable
|
|
213
|
+
|
|
214
|
+
file_path = "no_lint_file.py"
|
|
215
|
+
content = "print('hello')"
|
|
216
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
217
|
+
|
|
218
|
+
# Mock the linter parts if they were to be called
|
|
219
|
+
if mock_agent_no_shadow and hasattr(mock_agent_no_shadow, 'shadow_linter') and mock_agent_no_shadow.shadow_linter:
|
|
220
|
+
mock_agent_no_shadow.shadow_linter.lint_shadow_file = MagicMock(return_value=None) # Should not be called
|
|
221
|
+
|
|
222
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_no_shadow, tool=tool, args=test_args)
|
|
223
|
+
result = resolver.resolve()
|
|
224
|
+
|
|
225
|
+
assert result.success is True
|
|
226
|
+
if mock_agent_no_shadow and hasattr(mock_agent_no_shadow, 'shadow_linter') and mock_agent_no_shadow.shadow_linter:
|
|
227
|
+
mock_agent_no_shadow.shadow_linter.lint_shadow_file.assert_not_called()
|
|
228
|
+
|
|
229
|
+
# Check if "代码质量检查已禁用" or "Linting is disabled" is in message
|
|
230
|
+
assert "代码质量检查已禁用" in result.message or "Linting is disabled" in result.message or "成功写入文件" in result.message
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def test_linting_called_if_enabled(test_args, temp_test_dir, mock_agent_with_shadow):
|
|
234
|
+
logger.info(f"Running test_linting_called_if_enabled in {temp_test_dir}")
|
|
235
|
+
test_args.enable_auto_fix_lint = True # Explicitly enable
|
|
236
|
+
|
|
237
|
+
file_path = "lint_file.py"
|
|
238
|
+
content = "print('hello world')" # Valid python
|
|
239
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
240
|
+
|
|
241
|
+
# Mock the lint_shadow_file method on the shadow_linter provided by mock_agent_with_shadow
|
|
242
|
+
mock_lint_result = MagicMock()
|
|
243
|
+
mock_lint_result.issues = [] # No issues
|
|
244
|
+
mock_agent_with_shadow.shadow_linter.lint_shadow_file = MagicMock(return_value=mock_lint_result)
|
|
245
|
+
|
|
246
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_with_shadow, tool=tool, args=test_args)
|
|
247
|
+
result = resolver.resolve()
|
|
248
|
+
|
|
249
|
+
assert result.success is True
|
|
250
|
+
mock_agent_with_shadow.shadow_linter.lint_shadow_file.assert_called_once()
|
|
251
|
+
# The actual path passed to lint_shadow_file will be the shadow path
|
|
252
|
+
shadow_path = mock_agent_with_shadow.shadow_manager.to_shadow_path(os.path.join(temp_test_dir, file_path))
|
|
253
|
+
mock_agent_with_shadow.shadow_linter.lint_shadow_file.assert_called_with(shadow_path)
|
|
254
|
+
assert "代码质量检查通过" in result.message or "Linting passed" in result.message
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def test_create_file_with_shadow_manager(test_args, temp_test_dir, mock_agent_with_shadow):
|
|
258
|
+
logger.info(f"Running test_create_file_with_shadow_manager in {temp_test_dir}")
|
|
259
|
+
file_path = "shadowed_file.txt"
|
|
260
|
+
content = "This file should be in the shadow realm."
|
|
261
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
262
|
+
|
|
263
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_with_shadow, tool=tool, args=test_args)
|
|
264
|
+
result = resolver.resolve()
|
|
265
|
+
|
|
266
|
+
assert result.success is True
|
|
267
|
+
|
|
268
|
+
real_file_abs_path = os.path.join(temp_test_dir, file_path)
|
|
269
|
+
shadow_file_abs_path = mock_agent_with_shadow.shadow_manager.to_shadow_path(real_file_abs_path)
|
|
270
|
+
|
|
271
|
+
assert not os.path.exists(real_file_abs_path) # Real file should not be created directly
|
|
272
|
+
assert os.path.exists(shadow_file_abs_path) # Shadow file should exist
|
|
273
|
+
with open(shadow_file_abs_path, "r", encoding="utf-8") as f:
|
|
274
|
+
assert f.read() == content
|
|
275
|
+
|
|
276
|
+
# Agent's record_file_change should still be called with the original relative path
|
|
277
|
+
mock_agent_with_shadow.record_file_change.assert_called_once_with(file_path, "added", content=content, diffs=None)
|
|
278
|
+
|
|
279
|
+
# Clean up shadows for this test if needed, though mock_agent_with_shadow might do it
|
|
280
|
+
# mock_agent_with_shadow.shadow_manager.clean_shadows()
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def test_linting_error_message_propagation(test_args, temp_test_dir, mock_agent_with_shadow):
|
|
284
|
+
logger.info(f"Running test_linting_error_message_propagation in {temp_test_dir}")
|
|
285
|
+
test_args.enable_auto_fix_lint = True
|
|
286
|
+
|
|
287
|
+
file_path = "lint_error_file.py"
|
|
288
|
+
content = "print 'hello'" # Python 2 print, will cause lint error in Python 3 env with basic pyflakes
|
|
289
|
+
tool = WriteToFileTool(path=file_path, content=content)
|
|
290
|
+
|
|
291
|
+
mock_issue = MagicMock()
|
|
292
|
+
mock_issue.severity = MagicMock() # Simulate IssueSeverity enum if needed by _format_lint_issues
|
|
293
|
+
mock_issue.severity.value = "ERROR" # Assuming _format_lint_issues checks for severity.value
|
|
294
|
+
mock_issue.position.line = 1
|
|
295
|
+
mock_issue.position.column = 0
|
|
296
|
+
mock_issue.message = "SyntaxError: Missing parentheses in call to 'print'"
|
|
297
|
+
mock_issue.code = "E999"
|
|
298
|
+
|
|
299
|
+
mock_lint_result = MagicMock()
|
|
300
|
+
mock_lint_result.issues = [mock_issue]
|
|
301
|
+
mock_lint_result.file_results = {
|
|
302
|
+
mock_agent_with_shadow.shadow_manager.to_shadow_path(os.path.join(temp_test_dir, file_path)): mock_lint_result
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
mock_agent_with_shadow.shadow_linter.lint_shadow_file = MagicMock(return_value=mock_lint_result)
|
|
307
|
+
# Mock _format_lint_issues if it's complex or to control its output precisely
|
|
308
|
+
# For now, assume it works as expected based on WriteToFileToolResolver's internal call
|
|
309
|
+
|
|
310
|
+
resolver = WriteToFileToolResolver(agent=mock_agent_with_shadow, tool=tool, args=test_args)
|
|
311
|
+
|
|
312
|
+
# Temporarily patch _format_lint_issues within the resolver instance for this test
|
|
313
|
+
# to ensure consistent output for assertion.
|
|
314
|
+
formatted_issue_text = f"文件: {mock_agent_with_shadow.shadow_manager.to_shadow_path(os.path.join(temp_test_dir, file_path))}\n - [错误] 第1行, 第0列: SyntaxError: Missing parentheses in call to 'print' (规则: E999)\n"
|
|
315
|
+
with patch.object(resolver, '_format_lint_issues', return_value=formatted_issue_text) as mock_format:
|
|
316
|
+
result = resolver.resolve()
|
|
317
|
+
|
|
318
|
+
assert result.success is True # Write itself is successful
|
|
319
|
+
mock_format.assert_called_once_with(mock_lint_result)
|
|
320
|
+
assert "代码质量检查发现 1 个问题" in result.message or "Linting found 1 issue(s)" in result.message
|
|
321
|
+
assert "SyntaxError: Missing parentheses in call to 'print'" in result.message
|
|
322
|
+
|