hanzo-mcp 0.6.13__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/analytics/__init__.py +5 -0
- hanzo_mcp/analytics/posthog_analytics.py +364 -0
- hanzo_mcp/cli.py +3 -3
- hanzo_mcp/cli_enhanced.py +3 -3
- hanzo_mcp/config/settings.py +1 -1
- hanzo_mcp/config/tool_config.py +18 -4
- hanzo_mcp/server.py +34 -1
- hanzo_mcp/tools/__init__.py +65 -2
- hanzo_mcp/tools/agent/__init__.py +84 -3
- hanzo_mcp/tools/agent/agent_tool.py +102 -4
- hanzo_mcp/tools/agent/agent_tool_v2.py +459 -0
- hanzo_mcp/tools/agent/clarification_protocol.py +220 -0
- hanzo_mcp/tools/agent/clarification_tool.py +68 -0
- hanzo_mcp/tools/agent/claude_cli_tool.py +125 -0
- hanzo_mcp/tools/agent/claude_desktop_auth.py +508 -0
- hanzo_mcp/tools/agent/cli_agent_base.py +191 -0
- hanzo_mcp/tools/agent/code_auth.py +436 -0
- hanzo_mcp/tools/agent/code_auth_tool.py +194 -0
- hanzo_mcp/tools/agent/codex_cli_tool.py +123 -0
- hanzo_mcp/tools/agent/critic_tool.py +376 -0
- hanzo_mcp/tools/agent/gemini_cli_tool.py +128 -0
- hanzo_mcp/tools/agent/grok_cli_tool.py +128 -0
- hanzo_mcp/tools/agent/iching_tool.py +380 -0
- hanzo_mcp/tools/agent/network_tool.py +273 -0
- hanzo_mcp/tools/agent/prompt.py +62 -20
- hanzo_mcp/tools/agent/review_tool.py +433 -0
- hanzo_mcp/tools/agent/swarm_tool.py +535 -0
- hanzo_mcp/tools/agent/swarm_tool_v2.py +594 -0
- hanzo_mcp/tools/common/base.py +1 -0
- hanzo_mcp/tools/common/batch_tool.py +102 -10
- hanzo_mcp/tools/common/fastmcp_pagination.py +369 -0
- hanzo_mcp/tools/common/forgiving_edit.py +243 -0
- hanzo_mcp/tools/common/paginated_base.py +230 -0
- hanzo_mcp/tools/common/paginated_response.py +307 -0
- hanzo_mcp/tools/common/pagination.py +226 -0
- hanzo_mcp/tools/common/tool_list.py +3 -0
- hanzo_mcp/tools/common/truncate.py +101 -0
- hanzo_mcp/tools/filesystem/__init__.py +29 -0
- hanzo_mcp/tools/filesystem/ast_multi_edit.py +562 -0
- hanzo_mcp/tools/filesystem/directory_tree_paginated.py +338 -0
- hanzo_mcp/tools/lsp/__init__.py +5 -0
- hanzo_mcp/tools/lsp/lsp_tool.py +512 -0
- hanzo_mcp/tools/memory/__init__.py +76 -0
- hanzo_mcp/tools/memory/knowledge_tools.py +518 -0
- hanzo_mcp/tools/memory/memory_tools.py +456 -0
- hanzo_mcp/tools/search/__init__.py +6 -0
- hanzo_mcp/tools/search/find_tool.py +581 -0
- hanzo_mcp/tools/search/unified_search.py +953 -0
- hanzo_mcp/tools/shell/__init__.py +5 -0
- hanzo_mcp/tools/shell/auto_background.py +203 -0
- hanzo_mcp/tools/shell/base_process.py +53 -27
- hanzo_mcp/tools/shell/bash_tool.py +17 -33
- hanzo_mcp/tools/shell/npx_tool.py +15 -32
- hanzo_mcp/tools/shell/streaming_command.py +594 -0
- hanzo_mcp/tools/shell/uvx_tool.py +15 -32
- hanzo_mcp/types.py +23 -0
- {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/METADATA +228 -71
- {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/RECORD +61 -24
- hanzo_mcp-0.6.13.dist-info/licenses/LICENSE +0 -21
- {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -25,6 +25,13 @@ from hanzo_mcp.tools.filesystem.search_tool import SearchTool
|
|
|
25
25
|
from hanzo_mcp.tools.filesystem.watch import watch_tool
|
|
26
26
|
from hanzo_mcp.tools.filesystem.diff import create_diff_tool
|
|
27
27
|
|
|
28
|
+
# Import new search tools
|
|
29
|
+
try:
|
|
30
|
+
from hanzo_mcp.tools.search import UnifiedSearch, create_unified_search_tool, FindTool, create_find_tool
|
|
31
|
+
UNIFIED_SEARCH_AVAILABLE = True
|
|
32
|
+
except ImportError:
|
|
33
|
+
UNIFIED_SEARCH_AVAILABLE = False
|
|
34
|
+
|
|
28
35
|
# Export all tool classes
|
|
29
36
|
__all__ = [
|
|
30
37
|
"ReadTool",
|
|
@@ -74,6 +81,13 @@ def get_read_only_filesystem_tools(
|
|
|
74
81
|
if project_manager:
|
|
75
82
|
tools.append(SearchTool(permission_manager, project_manager))
|
|
76
83
|
|
|
84
|
+
# Add new search tools if available
|
|
85
|
+
if UNIFIED_SEARCH_AVAILABLE:
|
|
86
|
+
tools.extend([
|
|
87
|
+
create_unified_search_tool(),
|
|
88
|
+
create_find_tool()
|
|
89
|
+
])
|
|
90
|
+
|
|
77
91
|
return tools
|
|
78
92
|
|
|
79
93
|
|
|
@@ -107,6 +121,13 @@ def get_filesystem_tools(permission_manager: PermissionManager, project_manager=
|
|
|
107
121
|
if project_manager:
|
|
108
122
|
tools.append(SearchTool(permission_manager, project_manager))
|
|
109
123
|
|
|
124
|
+
# Add new search tools if available
|
|
125
|
+
if UNIFIED_SEARCH_AVAILABLE:
|
|
126
|
+
tools.extend([
|
|
127
|
+
create_unified_search_tool(),
|
|
128
|
+
create_find_tool()
|
|
129
|
+
])
|
|
130
|
+
|
|
110
131
|
return tools
|
|
111
132
|
|
|
112
133
|
|
|
@@ -150,6 +171,11 @@ def register_filesystem_tools(
|
|
|
150
171
|
"diff": create_diff_tool,
|
|
151
172
|
}
|
|
152
173
|
|
|
174
|
+
# Add new search tools if available
|
|
175
|
+
if UNIFIED_SEARCH_AVAILABLE:
|
|
176
|
+
tool_classes["unified_search"] = lambda pm: create_unified_search_tool()
|
|
177
|
+
tool_classes["find"] = lambda pm: create_find_tool()
|
|
178
|
+
|
|
153
179
|
tools = []
|
|
154
180
|
|
|
155
181
|
if enabled_tools:
|
|
@@ -163,6 +189,9 @@ def register_filesystem_tools(
|
|
|
163
189
|
elif tool_name == "watch":
|
|
164
190
|
# Watch tool is a singleton
|
|
165
191
|
tools.append(tool_class(permission_manager))
|
|
192
|
+
elif tool_name in ["unified_search", "find"]:
|
|
193
|
+
# New search tools are factory functions
|
|
194
|
+
tools.append(tool_class(permission_manager))
|
|
166
195
|
else:
|
|
167
196
|
tools.append(tool_class(permission_manager))
|
|
168
197
|
else:
|
|
@@ -0,0 +1,562 @@
|
|
|
1
|
+
"""AST-aware multi-edit tool using treesitter for accurate code modifications."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import json
|
|
5
|
+
from typing import List, Dict, Any, Optional, Tuple, Set
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
|
|
10
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
11
|
+
from hanzo_mcp.tools.common.decorators import with_context_normalization
|
|
12
|
+
from hanzo_mcp.tools.common.paginated_response import AutoPaginatedResponse
|
|
13
|
+
from hanzo_mcp.types import MCPResourceDocument
|
|
14
|
+
|
|
15
|
+
try:
|
|
16
|
+
import tree_sitter
|
|
17
|
+
import tree_sitter_python
|
|
18
|
+
import tree_sitter_javascript
|
|
19
|
+
import tree_sitter_typescript
|
|
20
|
+
import tree_sitter_go
|
|
21
|
+
import tree_sitter_rust
|
|
22
|
+
import tree_sitter_java
|
|
23
|
+
import tree_sitter_cpp
|
|
24
|
+
TREESITTER_AVAILABLE = True
|
|
25
|
+
except ImportError:
|
|
26
|
+
TREESITTER_AVAILABLE = False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ASTMatch:
|
|
31
|
+
"""Represents an AST match with context."""
|
|
32
|
+
file_path: str
|
|
33
|
+
line_start: int
|
|
34
|
+
line_end: int
|
|
35
|
+
column_start: int
|
|
36
|
+
column_end: int
|
|
37
|
+
node_type: str
|
|
38
|
+
text: str
|
|
39
|
+
parent_context: Optional[str] = None
|
|
40
|
+
semantic_context: Optional[str] = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class EditOperation:
|
|
45
|
+
"""Enhanced edit operation with AST awareness."""
|
|
46
|
+
old_string: str
|
|
47
|
+
new_string: str
|
|
48
|
+
node_types: Optional[List[str]] = None # Restrict to specific AST node types
|
|
49
|
+
semantic_match: bool = False # Use semantic matching
|
|
50
|
+
expect_count: Optional[int] = None # Expected number of matches
|
|
51
|
+
context_lines: int = 5 # Lines of context for uniqueness
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class ASTMultiEdit(BaseTool):
|
|
55
|
+
"""Multi-edit tool with AST awareness and automatic reference finding."""
|
|
56
|
+
|
|
57
|
+
name = "ast_multi_edit"
|
|
58
|
+
description = """Enhanced multi-edit with AST awareness and reference finding.
|
|
59
|
+
|
|
60
|
+
Features:
|
|
61
|
+
- AST-based search for accurate matches
|
|
62
|
+
- Automatic reference finding across codebase
|
|
63
|
+
- Semantic matching (find all usages of a symbol)
|
|
64
|
+
- Result pagination to avoid token limits
|
|
65
|
+
- Context-aware replacements
|
|
66
|
+
|
|
67
|
+
Examples:
|
|
68
|
+
1. Rename a function and all its calls:
|
|
69
|
+
ast_multi_edit("file.py", [
|
|
70
|
+
{"old_string": "oldFunc", "new_string": "newFunc", "semantic_match": true}
|
|
71
|
+
])
|
|
72
|
+
|
|
73
|
+
2. Update specific node types only:
|
|
74
|
+
ast_multi_edit("file.go", [
|
|
75
|
+
{"old_string": "StopTracking", "new_string": "StopTrackingWithContext",
|
|
76
|
+
"node_types": ["call_expression"]}
|
|
77
|
+
])
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(self):
|
|
81
|
+
super().__init__()
|
|
82
|
+
self.parsers = {}
|
|
83
|
+
self.languages = {}
|
|
84
|
+
|
|
85
|
+
if TREESITTER_AVAILABLE:
|
|
86
|
+
self._init_parsers()
|
|
87
|
+
|
|
88
|
+
def _init_parsers(self):
|
|
89
|
+
"""Initialize treesitter parsers for supported languages."""
|
|
90
|
+
language_mapping = {
|
|
91
|
+
'.py': (tree_sitter_python, 'python'),
|
|
92
|
+
'.js': (tree_sitter_javascript, 'javascript'),
|
|
93
|
+
'.jsx': (tree_sitter_javascript, 'javascript'),
|
|
94
|
+
'.ts': (tree_sitter_typescript.typescript, 'typescript'),
|
|
95
|
+
'.tsx': (tree_sitter_typescript.tsx, 'tsx'),
|
|
96
|
+
'.go': (tree_sitter_go, 'go'),
|
|
97
|
+
'.rs': (tree_sitter_rust, 'rust'),
|
|
98
|
+
'.java': (tree_sitter_java, 'java'),
|
|
99
|
+
'.cpp': (tree_sitter_cpp, 'cpp'),
|
|
100
|
+
'.cc': (tree_sitter_cpp, 'cpp'),
|
|
101
|
+
'.cxx': (tree_sitter_cpp, 'cpp'),
|
|
102
|
+
'.h': (tree_sitter_cpp, 'cpp'),
|
|
103
|
+
'.hpp': (tree_sitter_cpp, 'cpp'),
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
for ext, (module, name) in language_mapping.items():
|
|
107
|
+
try:
|
|
108
|
+
parser = tree_sitter.Parser()
|
|
109
|
+
if hasattr(module, 'language'):
|
|
110
|
+
parser.set_language(module.language())
|
|
111
|
+
else:
|
|
112
|
+
# For older tree-sitter bindings
|
|
113
|
+
lang = tree_sitter.Language(module.language(), name)
|
|
114
|
+
parser.set_language(lang)
|
|
115
|
+
self.parsers[ext] = parser
|
|
116
|
+
self.languages[ext] = name
|
|
117
|
+
except Exception as e:
|
|
118
|
+
print(f"Failed to initialize parser for {ext}: {e}")
|
|
119
|
+
|
|
120
|
+
def _get_parser(self, file_path: str) -> Optional[tree_sitter.Parser]:
|
|
121
|
+
"""Get parser for file type."""
|
|
122
|
+
ext = Path(file_path).suffix.lower()
|
|
123
|
+
return self.parsers.get(ext)
|
|
124
|
+
|
|
125
|
+
def _parse_file(self, file_path: str, content: str) -> Optional[tree_sitter.Tree]:
|
|
126
|
+
"""Parse file content into AST."""
|
|
127
|
+
parser = self._get_parser(file_path)
|
|
128
|
+
if not parser:
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
return parser.parse(bytes(content, 'utf-8'))
|
|
132
|
+
|
|
133
|
+
def _find_references(self,
|
|
134
|
+
symbol: str,
|
|
135
|
+
file_path: str,
|
|
136
|
+
project_root: Optional[str] = None) -> List[ASTMatch]:
|
|
137
|
+
"""Find all references to a symbol across the project."""
|
|
138
|
+
matches = []
|
|
139
|
+
|
|
140
|
+
if not project_root:
|
|
141
|
+
project_root = self._find_project_root(file_path)
|
|
142
|
+
|
|
143
|
+
# Get language-specific reference patterns
|
|
144
|
+
patterns = self._get_reference_patterns(symbol, file_path)
|
|
145
|
+
|
|
146
|
+
# Search across all relevant files
|
|
147
|
+
for pattern in patterns:
|
|
148
|
+
# Use grep_ast tool for efficient AST-aware search
|
|
149
|
+
results = self._search_with_ast(pattern, project_root)
|
|
150
|
+
matches.extend(results)
|
|
151
|
+
|
|
152
|
+
return matches
|
|
153
|
+
|
|
154
|
+
def _get_reference_patterns(self, symbol: str, file_path: str) -> List[Dict[str, Any]]:
|
|
155
|
+
"""Get language-specific patterns for finding references."""
|
|
156
|
+
ext = Path(file_path).suffix.lower()
|
|
157
|
+
lang = self.languages.get(ext, 'generic')
|
|
158
|
+
|
|
159
|
+
patterns = []
|
|
160
|
+
|
|
161
|
+
if lang == 'go':
|
|
162
|
+
# Go specific patterns
|
|
163
|
+
patterns.extend([
|
|
164
|
+
# Function calls
|
|
165
|
+
{"query": f"(call_expression function: (identifier) @func (#eq? @func \"{symbol}\"))", "type": "call"},
|
|
166
|
+
# Method calls
|
|
167
|
+
{"query": f"(call_expression function: (selector_expression field: (field_identifier) @method (#eq? @method \"{symbol}\")))", "type": "method_call"},
|
|
168
|
+
# Function declarations
|
|
169
|
+
{"query": f"(function_declaration name: (identifier) @name (#eq? @name \"{symbol}\"))", "type": "declaration"},
|
|
170
|
+
# Type references
|
|
171
|
+
{"query": f"(type_identifier) @type (#eq? @type \"{symbol}\")", "type": "type_ref"},
|
|
172
|
+
])
|
|
173
|
+
elif lang in ['javascript', 'typescript', 'tsx']:
|
|
174
|
+
patterns.extend([
|
|
175
|
+
# Function calls
|
|
176
|
+
{"query": f"(call_expression function: (identifier) @func (#eq? @func \"{symbol}\"))", "type": "call"},
|
|
177
|
+
# Method calls
|
|
178
|
+
{"query": f"(call_expression function: (member_expression property: (property_identifier) @prop (#eq? @prop \"{symbol}\")))", "type": "method_call"},
|
|
179
|
+
# Function declarations
|
|
180
|
+
{"query": f"(function_declaration name: (identifier) @name (#eq? @name \"{symbol}\"))", "type": "declaration"},
|
|
181
|
+
# Variable declarations
|
|
182
|
+
{"query": f"(variable_declarator name: (identifier) @var (#eq? @var \"{symbol}\"))", "type": "variable"},
|
|
183
|
+
])
|
|
184
|
+
elif lang == 'python':
|
|
185
|
+
patterns.extend([
|
|
186
|
+
# Function calls
|
|
187
|
+
{"query": f"(call function: (identifier) @func (#eq? @func \"{symbol}\"))", "type": "call"},
|
|
188
|
+
# Method calls
|
|
189
|
+
{"query": f"(call function: (attribute attribute: (identifier) @attr (#eq? @attr \"{symbol}\")))", "type": "method_call"},
|
|
190
|
+
# Function definitions
|
|
191
|
+
{"query": f"(function_definition name: (identifier) @name (#eq? @name \"{symbol}\"))", "type": "declaration"},
|
|
192
|
+
# Class definitions
|
|
193
|
+
{"query": f"(class_definition name: (identifier) @name (#eq? @name \"{symbol}\"))", "type": "class"},
|
|
194
|
+
])
|
|
195
|
+
else:
|
|
196
|
+
# Generic patterns
|
|
197
|
+
patterns.append({"query": symbol, "type": "text"})
|
|
198
|
+
|
|
199
|
+
return patterns
|
|
200
|
+
|
|
201
|
+
def _search_with_ast(self, pattern: Dict[str, Any], root: str) -> List[ASTMatch]:
|
|
202
|
+
"""Search using AST patterns."""
|
|
203
|
+
matches = []
|
|
204
|
+
|
|
205
|
+
# This would integrate with grep_ast tool
|
|
206
|
+
# For now, simulate the search
|
|
207
|
+
import glob
|
|
208
|
+
|
|
209
|
+
for file_path in glob.glob(f"{root}/**/*.*", recursive=True):
|
|
210
|
+
if self._should_skip_file(file_path):
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
215
|
+
content = f.read()
|
|
216
|
+
|
|
217
|
+
tree = self._parse_file(file_path, content)
|
|
218
|
+
if tree and pattern["type"] != "text":
|
|
219
|
+
# Use treesitter query
|
|
220
|
+
matches.extend(self._query_ast(tree, pattern, file_path, content))
|
|
221
|
+
else:
|
|
222
|
+
# Fallback to text search
|
|
223
|
+
matches.extend(self._text_search(content, pattern["query"], file_path))
|
|
224
|
+
|
|
225
|
+
except Exception:
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
return matches
|
|
229
|
+
|
|
230
|
+
def _query_ast(self,
|
|
231
|
+
tree: tree_sitter.Tree,
|
|
232
|
+
pattern: Dict[str, Any],
|
|
233
|
+
file_path: str,
|
|
234
|
+
content: str) -> List[ASTMatch]:
|
|
235
|
+
"""Query AST with treesitter pattern."""
|
|
236
|
+
matches = []
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
# Get language for query
|
|
240
|
+
lang_name = self.languages.get(Path(file_path).suffix.lower())
|
|
241
|
+
if not lang_name:
|
|
242
|
+
return matches
|
|
243
|
+
|
|
244
|
+
# Execute query
|
|
245
|
+
query = tree_sitter.Query(pattern["query"], lang_name)
|
|
246
|
+
captures = query.captures(tree.root_node)
|
|
247
|
+
|
|
248
|
+
lines = content.split('\n')
|
|
249
|
+
|
|
250
|
+
for node, name in captures:
|
|
251
|
+
match = ASTMatch(
|
|
252
|
+
file_path=file_path,
|
|
253
|
+
line_start=node.start_point[0] + 1,
|
|
254
|
+
line_end=node.end_point[0] + 1,
|
|
255
|
+
column_start=node.start_point[1],
|
|
256
|
+
column_end=node.end_point[1],
|
|
257
|
+
node_type=node.type,
|
|
258
|
+
text=content[node.start_byte:node.end_byte],
|
|
259
|
+
parent_context=self._get_parent_context(node, content),
|
|
260
|
+
semantic_context=pattern["type"]
|
|
261
|
+
)
|
|
262
|
+
matches.append(match)
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
# Fallback to simple search
|
|
266
|
+
pass
|
|
267
|
+
|
|
268
|
+
return matches
|
|
269
|
+
|
|
270
|
+
def _get_parent_context(self, node: tree_sitter.Node, content: str) -> Optional[str]:
|
|
271
|
+
"""Get parent context for better understanding."""
|
|
272
|
+
parent = node.parent
|
|
273
|
+
if parent:
|
|
274
|
+
# Get parent function/class name
|
|
275
|
+
if parent.type in ['function_declaration', 'function_definition', 'method_definition']:
|
|
276
|
+
for child in parent.children:
|
|
277
|
+
if child.type == 'identifier':
|
|
278
|
+
return f"function: {content[child.start_byte:child.end_byte]}"
|
|
279
|
+
elif parent.type in ['class_declaration', 'class_definition']:
|
|
280
|
+
for child in parent.children:
|
|
281
|
+
if child.type == 'identifier':
|
|
282
|
+
return f"class: {content[child.start_byte:child.end_byte]}"
|
|
283
|
+
|
|
284
|
+
return None
|
|
285
|
+
|
|
286
|
+
def _text_search(self, content: str, pattern: str, file_path: str) -> List[ASTMatch]:
|
|
287
|
+
"""Fallback text search."""
|
|
288
|
+
matches = []
|
|
289
|
+
lines = content.split('\n')
|
|
290
|
+
|
|
291
|
+
for i, line in enumerate(lines):
|
|
292
|
+
if pattern in line:
|
|
293
|
+
col = line.find(pattern)
|
|
294
|
+
match = ASTMatch(
|
|
295
|
+
file_path=file_path,
|
|
296
|
+
line_start=i + 1,
|
|
297
|
+
line_end=i + 1,
|
|
298
|
+
column_start=col,
|
|
299
|
+
column_end=col + len(pattern),
|
|
300
|
+
node_type='text',
|
|
301
|
+
text=pattern,
|
|
302
|
+
semantic_context='text_match'
|
|
303
|
+
)
|
|
304
|
+
matches.append(match)
|
|
305
|
+
|
|
306
|
+
return matches
|
|
307
|
+
|
|
308
|
+
def _should_skip_file(self, file_path: str) -> bool:
|
|
309
|
+
"""Check if file should be skipped."""
|
|
310
|
+
skip_dirs = {'.git', 'node_modules', '__pycache__', '.pytest_cache', 'venv', '.env'}
|
|
311
|
+
skip_extensions = {'.pyc', '.pyo', '.so', '.dylib', '.dll', '.exe'}
|
|
312
|
+
|
|
313
|
+
path = Path(file_path)
|
|
314
|
+
|
|
315
|
+
# Check directories
|
|
316
|
+
for part in path.parts:
|
|
317
|
+
if part in skip_dirs:
|
|
318
|
+
return True
|
|
319
|
+
|
|
320
|
+
# Check extensions
|
|
321
|
+
if path.suffix in skip_extensions:
|
|
322
|
+
return True
|
|
323
|
+
|
|
324
|
+
# Check if binary
|
|
325
|
+
try:
|
|
326
|
+
with open(file_path, 'rb') as f:
|
|
327
|
+
chunk = f.read(512)
|
|
328
|
+
if b'\0' in chunk:
|
|
329
|
+
return True
|
|
330
|
+
except:
|
|
331
|
+
return True
|
|
332
|
+
|
|
333
|
+
return False
|
|
334
|
+
|
|
335
|
+
def _find_project_root(self, file_path: str) -> str:
|
|
336
|
+
"""Find project root by looking for markers."""
|
|
337
|
+
markers = {'.git', 'package.json', 'go.mod', 'Cargo.toml', 'pyproject.toml', 'setup.py'}
|
|
338
|
+
|
|
339
|
+
path = Path(file_path).resolve()
|
|
340
|
+
for parent in path.parents:
|
|
341
|
+
for marker in markers:
|
|
342
|
+
if (parent / marker).exists():
|
|
343
|
+
return str(parent)
|
|
344
|
+
|
|
345
|
+
return str(path.parent)
|
|
346
|
+
|
|
347
|
+
def _group_matches_by_file(self, matches: List[ASTMatch]) -> Dict[str, List[ASTMatch]]:
|
|
348
|
+
"""Group matches by file for efficient editing."""
|
|
349
|
+
grouped = defaultdict(list)
|
|
350
|
+
for match in matches:
|
|
351
|
+
grouped[match.file_path].append(match)
|
|
352
|
+
return grouped
|
|
353
|
+
|
|
354
|
+
def _create_unique_context(self,
|
|
355
|
+
content: str,
|
|
356
|
+
match: ASTMatch,
|
|
357
|
+
context_lines: int) -> str:
|
|
358
|
+
"""Create unique context for edit identification."""
|
|
359
|
+
lines = content.split('\n')
|
|
360
|
+
|
|
361
|
+
start_line = max(0, match.line_start - context_lines - 1)
|
|
362
|
+
end_line = min(len(lines), match.line_end + context_lines)
|
|
363
|
+
|
|
364
|
+
context_lines = lines[start_line:end_line]
|
|
365
|
+
return '\n'.join(context_lines)
|
|
366
|
+
|
|
367
|
+
async def run(self,
|
|
368
|
+
file_path: str,
|
|
369
|
+
edits: List[Dict[str, Any]],
|
|
370
|
+
find_references: bool = False,
|
|
371
|
+
page_size: int = 50,
|
|
372
|
+
preview_only: bool = False,
|
|
373
|
+
**kwargs) -> MCPResourceDocument:
|
|
374
|
+
"""Execute AST-aware multi-edit operation.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
file_path: Primary file to edit
|
|
378
|
+
edits: List of edit operations
|
|
379
|
+
find_references: Whether to find and edit references across codebase
|
|
380
|
+
page_size: Number of results per page
|
|
381
|
+
preview_only: Show what would be changed without applying
|
|
382
|
+
"""
|
|
383
|
+
|
|
384
|
+
if not TREESITTER_AVAILABLE:
|
|
385
|
+
return self._fallback_to_basic_edit(file_path, edits)
|
|
386
|
+
|
|
387
|
+
results = {
|
|
388
|
+
"primary_file": file_path,
|
|
389
|
+
"edits_requested": len(edits),
|
|
390
|
+
"files_analyzed": 0,
|
|
391
|
+
"matches_found": 0,
|
|
392
|
+
"edits_applied": 0,
|
|
393
|
+
"errors": [],
|
|
394
|
+
"changes": []
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
# Convert edits to EditOperation objects
|
|
398
|
+
edit_ops = []
|
|
399
|
+
for edit in edits:
|
|
400
|
+
edit_ops.append(EditOperation(
|
|
401
|
+
old_string=edit["old_string"],
|
|
402
|
+
new_string=edit["new_string"],
|
|
403
|
+
node_types=edit.get("node_types"),
|
|
404
|
+
semantic_match=edit.get("semantic_match", False),
|
|
405
|
+
expect_count=edit.get("expect_count"),
|
|
406
|
+
context_lines=edit.get("context_lines", 5)
|
|
407
|
+
))
|
|
408
|
+
|
|
409
|
+
# Find all matches
|
|
410
|
+
all_matches = []
|
|
411
|
+
|
|
412
|
+
# First, analyze primary file
|
|
413
|
+
try:
|
|
414
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
415
|
+
content = f.read()
|
|
416
|
+
|
|
417
|
+
tree = self._parse_file(file_path, content)
|
|
418
|
+
|
|
419
|
+
for edit_op in edit_ops:
|
|
420
|
+
if edit_op.semantic_match and find_references:
|
|
421
|
+
# Find all references across codebase
|
|
422
|
+
matches = self._find_references(edit_op.old_string, file_path)
|
|
423
|
+
else:
|
|
424
|
+
# Just search in current file
|
|
425
|
+
if tree:
|
|
426
|
+
pattern = {"query": edit_op.old_string, "type": "text"}
|
|
427
|
+
matches = self._query_ast(tree, pattern, file_path, content)
|
|
428
|
+
else:
|
|
429
|
+
matches = self._text_search(content, edit_op.old_string, file_path)
|
|
430
|
+
|
|
431
|
+
# Filter by node types if specified
|
|
432
|
+
if edit_op.node_types:
|
|
433
|
+
matches = [m for m in matches if m.node_type in edit_op.node_types]
|
|
434
|
+
|
|
435
|
+
# Check expected count
|
|
436
|
+
if edit_op.expect_count is not None and len(matches) != edit_op.expect_count:
|
|
437
|
+
results["errors"].append({
|
|
438
|
+
"edit": edit_op.old_string,
|
|
439
|
+
"expected": edit_op.expect_count,
|
|
440
|
+
"found": len(matches),
|
|
441
|
+
"locations": [f"{m.file_path}:{m.line_start}" for m in matches[:5]]
|
|
442
|
+
})
|
|
443
|
+
continue
|
|
444
|
+
|
|
445
|
+
all_matches.extend([(edit_op, match) for match in matches])
|
|
446
|
+
|
|
447
|
+
except Exception as e:
|
|
448
|
+
results["errors"].append({
|
|
449
|
+
"file": file_path,
|
|
450
|
+
"error": str(e)
|
|
451
|
+
})
|
|
452
|
+
return MCPResourceDocument(data=results)
|
|
453
|
+
|
|
454
|
+
results["matches_found"] = len(all_matches)
|
|
455
|
+
results["files_analyzed"] = len(set(m[1].file_path for m in all_matches))
|
|
456
|
+
|
|
457
|
+
if preview_only:
|
|
458
|
+
# Return preview of changes
|
|
459
|
+
preview = self._generate_preview(all_matches, page_size)
|
|
460
|
+
results["preview"] = preview
|
|
461
|
+
return MCPResourceDocument(data=results)
|
|
462
|
+
|
|
463
|
+
# Apply edits
|
|
464
|
+
changes_by_file = self._group_changes(all_matches)
|
|
465
|
+
|
|
466
|
+
for file_path, changes in changes_by_file.items():
|
|
467
|
+
try:
|
|
468
|
+
success = await self._apply_file_changes(file_path, changes)
|
|
469
|
+
if success:
|
|
470
|
+
results["edits_applied"] += len(changes)
|
|
471
|
+
results["changes"].append({
|
|
472
|
+
"file": file_path,
|
|
473
|
+
"edits": len(changes)
|
|
474
|
+
})
|
|
475
|
+
except Exception as e:
|
|
476
|
+
results["errors"].append({
|
|
477
|
+
"file": file_path,
|
|
478
|
+
"error": str(e)
|
|
479
|
+
})
|
|
480
|
+
|
|
481
|
+
return MCPResourceDocument(data=results)
|
|
482
|
+
|
|
483
|
+
def _group_changes(self, matches: List[Tuple[EditOperation, ASTMatch]]) -> Dict[str, List[Tuple[EditOperation, ASTMatch]]]:
|
|
484
|
+
"""Group changes by file."""
|
|
485
|
+
grouped = defaultdict(list)
|
|
486
|
+
for edit_op, match in matches:
|
|
487
|
+
grouped[match.file_path].append((edit_op, match))
|
|
488
|
+
return grouped
|
|
489
|
+
|
|
490
|
+
async def _apply_file_changes(self,
|
|
491
|
+
file_path: str,
|
|
492
|
+
changes: List[Tuple[EditOperation, ASTMatch]]) -> bool:
|
|
493
|
+
"""Apply changes to a single file."""
|
|
494
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
495
|
+
content = f.read()
|
|
496
|
+
|
|
497
|
+
# Sort changes by position (reverse order to maintain positions)
|
|
498
|
+
changes.sort(key=lambda x: (x[1].line_start, x[1].column_start), reverse=True)
|
|
499
|
+
|
|
500
|
+
lines = content.split('\n')
|
|
501
|
+
|
|
502
|
+
for edit_op, match in changes:
|
|
503
|
+
# Create unique context for this match
|
|
504
|
+
context = self._create_unique_context(content, match, edit_op.context_lines)
|
|
505
|
+
|
|
506
|
+
# Apply the edit
|
|
507
|
+
if match.line_start == match.line_end:
|
|
508
|
+
# Single line edit
|
|
509
|
+
line = lines[match.line_start - 1]
|
|
510
|
+
before = line[:match.column_start]
|
|
511
|
+
after = line[match.column_end:]
|
|
512
|
+
lines[match.line_start - 1] = before + edit_op.new_string + after
|
|
513
|
+
else:
|
|
514
|
+
# Multi-line edit
|
|
515
|
+
# Remove old lines
|
|
516
|
+
del lines[match.line_start - 1:match.line_end]
|
|
517
|
+
# Insert new content
|
|
518
|
+
lines.insert(match.line_start - 1, edit_op.new_string)
|
|
519
|
+
|
|
520
|
+
# Write back
|
|
521
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
522
|
+
f.write('\n'.join(lines))
|
|
523
|
+
|
|
524
|
+
return True
|
|
525
|
+
|
|
526
|
+
def _generate_preview(self,
|
|
527
|
+
matches: List[Tuple[EditOperation, ASTMatch]],
|
|
528
|
+
page_size: int) -> List[Dict[str, Any]]:
|
|
529
|
+
"""Generate preview of changes."""
|
|
530
|
+
preview = []
|
|
531
|
+
|
|
532
|
+
for i, (edit_op, match) in enumerate(matches[:page_size]):
|
|
533
|
+
preview.append({
|
|
534
|
+
"file": match.file_path,
|
|
535
|
+
"line": match.line_start,
|
|
536
|
+
"column": match.column_start,
|
|
537
|
+
"node_type": match.node_type,
|
|
538
|
+
"context": match.parent_context,
|
|
539
|
+
"old": edit_op.old_string,
|
|
540
|
+
"new": edit_op.new_string,
|
|
541
|
+
"semantic_type": match.semantic_context
|
|
542
|
+
})
|
|
543
|
+
|
|
544
|
+
if len(matches) > page_size:
|
|
545
|
+
preview.append({
|
|
546
|
+
"note": f"... and {len(matches) - page_size} more matches"
|
|
547
|
+
})
|
|
548
|
+
|
|
549
|
+
return preview
|
|
550
|
+
|
|
551
|
+
def _fallback_to_basic_edit(self, file_path: str, edits: List[Dict[str, Any]]) -> MCPResourceDocument:
|
|
552
|
+
"""Fallback to basic multi-edit when treesitter not available."""
|
|
553
|
+
# Delegate to existing multi_edit tool
|
|
554
|
+
from hanzo_mcp.tools.filesystem.multi_edit import MultiEdit
|
|
555
|
+
basic_tool = MultiEdit()
|
|
556
|
+
return basic_tool.run(file_path, edits)
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
# Tool registration
|
|
560
|
+
def create_ast_multi_edit_tool():
|
|
561
|
+
"""Factory function to create AST multi-edit tool."""
|
|
562
|
+
return ASTMultiEdit()
|