hanzo-mcp 0.5.1__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (118) hide show
  1. hanzo_mcp/__init__.py +1 -1
  2. hanzo_mcp/cli.py +32 -0
  3. hanzo_mcp/dev_server.py +246 -0
  4. hanzo_mcp/prompts/__init__.py +1 -1
  5. hanzo_mcp/prompts/project_system.py +43 -7
  6. hanzo_mcp/server.py +5 -1
  7. hanzo_mcp/tools/__init__.py +168 -6
  8. hanzo_mcp/tools/agent/__init__.py +1 -1
  9. hanzo_mcp/tools/agent/agent.py +401 -0
  10. hanzo_mcp/tools/agent/agent_tool.py +3 -4
  11. hanzo_mcp/tools/common/__init__.py +1 -1
  12. hanzo_mcp/tools/common/base.py +9 -4
  13. hanzo_mcp/tools/common/batch_tool.py +3 -5
  14. hanzo_mcp/tools/common/config_tool.py +1 -1
  15. hanzo_mcp/tools/common/context.py +1 -1
  16. hanzo_mcp/tools/common/palette.py +344 -0
  17. hanzo_mcp/tools/common/palette_loader.py +108 -0
  18. hanzo_mcp/tools/common/stats.py +261 -0
  19. hanzo_mcp/tools/common/thinking_tool.py +3 -5
  20. hanzo_mcp/tools/common/tool_disable.py +144 -0
  21. hanzo_mcp/tools/common/tool_enable.py +182 -0
  22. hanzo_mcp/tools/common/tool_list.py +260 -0
  23. hanzo_mcp/tools/config/__init__.py +10 -0
  24. hanzo_mcp/tools/config/config_tool.py +212 -0
  25. hanzo_mcp/tools/config/index_config.py +176 -0
  26. hanzo_mcp/tools/config/palette_tool.py +166 -0
  27. hanzo_mcp/tools/database/__init__.py +71 -0
  28. hanzo_mcp/tools/database/database_manager.py +246 -0
  29. hanzo_mcp/tools/database/graph.py +482 -0
  30. hanzo_mcp/tools/database/graph_add.py +257 -0
  31. hanzo_mcp/tools/database/graph_query.py +536 -0
  32. hanzo_mcp/tools/database/graph_remove.py +267 -0
  33. hanzo_mcp/tools/database/graph_search.py +348 -0
  34. hanzo_mcp/tools/database/graph_stats.py +345 -0
  35. hanzo_mcp/tools/database/sql.py +411 -0
  36. hanzo_mcp/tools/database/sql_query.py +229 -0
  37. hanzo_mcp/tools/database/sql_search.py +296 -0
  38. hanzo_mcp/tools/database/sql_stats.py +254 -0
  39. hanzo_mcp/tools/editor/__init__.py +11 -0
  40. hanzo_mcp/tools/editor/neovim_command.py +272 -0
  41. hanzo_mcp/tools/editor/neovim_edit.py +290 -0
  42. hanzo_mcp/tools/editor/neovim_session.py +356 -0
  43. hanzo_mcp/tools/filesystem/__init__.py +52 -13
  44. hanzo_mcp/tools/filesystem/base.py +1 -1
  45. hanzo_mcp/tools/filesystem/batch_search.py +812 -0
  46. hanzo_mcp/tools/filesystem/content_replace.py +3 -5
  47. hanzo_mcp/tools/filesystem/diff.py +193 -0
  48. hanzo_mcp/tools/filesystem/directory_tree.py +3 -5
  49. hanzo_mcp/tools/filesystem/edit.py +3 -5
  50. hanzo_mcp/tools/filesystem/find.py +443 -0
  51. hanzo_mcp/tools/filesystem/find_files.py +348 -0
  52. hanzo_mcp/tools/filesystem/git_search.py +505 -0
  53. hanzo_mcp/tools/filesystem/grep.py +2 -2
  54. hanzo_mcp/tools/filesystem/multi_edit.py +3 -5
  55. hanzo_mcp/tools/filesystem/read.py +17 -5
  56. hanzo_mcp/tools/filesystem/{grep_ast_tool.py → symbols.py} +17 -27
  57. hanzo_mcp/tools/filesystem/symbols_unified.py +376 -0
  58. hanzo_mcp/tools/filesystem/tree.py +268 -0
  59. hanzo_mcp/tools/filesystem/unified_search.py +465 -443
  60. hanzo_mcp/tools/filesystem/unix_aliases.py +99 -0
  61. hanzo_mcp/tools/filesystem/watch.py +174 -0
  62. hanzo_mcp/tools/filesystem/write.py +3 -5
  63. hanzo_mcp/tools/jupyter/__init__.py +9 -12
  64. hanzo_mcp/tools/jupyter/base.py +1 -1
  65. hanzo_mcp/tools/jupyter/jupyter.py +326 -0
  66. hanzo_mcp/tools/jupyter/notebook_edit.py +3 -4
  67. hanzo_mcp/tools/jupyter/notebook_read.py +3 -5
  68. hanzo_mcp/tools/llm/__init__.py +31 -0
  69. hanzo_mcp/tools/llm/consensus_tool.py +351 -0
  70. hanzo_mcp/tools/llm/llm_manage.py +413 -0
  71. hanzo_mcp/tools/llm/llm_tool.py +346 -0
  72. hanzo_mcp/tools/llm/llm_unified.py +851 -0
  73. hanzo_mcp/tools/llm/provider_tools.py +412 -0
  74. hanzo_mcp/tools/mcp/__init__.py +15 -0
  75. hanzo_mcp/tools/mcp/mcp_add.py +263 -0
  76. hanzo_mcp/tools/mcp/mcp_remove.py +127 -0
  77. hanzo_mcp/tools/mcp/mcp_stats.py +165 -0
  78. hanzo_mcp/tools/mcp/mcp_unified.py +503 -0
  79. hanzo_mcp/tools/shell/__init__.py +21 -23
  80. hanzo_mcp/tools/shell/base.py +1 -1
  81. hanzo_mcp/tools/shell/base_process.py +303 -0
  82. hanzo_mcp/tools/shell/bash_unified.py +134 -0
  83. hanzo_mcp/tools/shell/logs.py +265 -0
  84. hanzo_mcp/tools/shell/npx.py +194 -0
  85. hanzo_mcp/tools/shell/npx_background.py +254 -0
  86. hanzo_mcp/tools/shell/npx_unified.py +101 -0
  87. hanzo_mcp/tools/shell/open.py +107 -0
  88. hanzo_mcp/tools/shell/pkill.py +262 -0
  89. hanzo_mcp/tools/shell/process_unified.py +131 -0
  90. hanzo_mcp/tools/shell/processes.py +279 -0
  91. hanzo_mcp/tools/shell/run_background.py +326 -0
  92. hanzo_mcp/tools/shell/run_command.py +3 -4
  93. hanzo_mcp/tools/shell/run_command_windows.py +3 -4
  94. hanzo_mcp/tools/shell/uvx.py +187 -0
  95. hanzo_mcp/tools/shell/uvx_background.py +249 -0
  96. hanzo_mcp/tools/shell/uvx_unified.py +101 -0
  97. hanzo_mcp/tools/todo/__init__.py +1 -1
  98. hanzo_mcp/tools/todo/base.py +1 -1
  99. hanzo_mcp/tools/todo/todo.py +265 -0
  100. hanzo_mcp/tools/todo/todo_read.py +3 -5
  101. hanzo_mcp/tools/todo/todo_write.py +3 -5
  102. hanzo_mcp/tools/vector/__init__.py +6 -1
  103. hanzo_mcp/tools/vector/git_ingester.py +3 -0
  104. hanzo_mcp/tools/vector/index_tool.py +358 -0
  105. hanzo_mcp/tools/vector/infinity_store.py +98 -0
  106. hanzo_mcp/tools/vector/project_manager.py +27 -5
  107. hanzo_mcp/tools/vector/vector.py +311 -0
  108. hanzo_mcp/tools/vector/vector_index.py +1 -1
  109. hanzo_mcp/tools/vector/vector_search.py +12 -7
  110. hanzo_mcp-0.6.1.dist-info/METADATA +336 -0
  111. hanzo_mcp-0.6.1.dist-info/RECORD +134 -0
  112. hanzo_mcp-0.6.1.dist-info/entry_points.txt +3 -0
  113. hanzo_mcp-0.5.1.dist-info/METADATA +0 -276
  114. hanzo_mcp-0.5.1.dist-info/RECORD +0 -68
  115. hanzo_mcp-0.5.1.dist-info/entry_points.txt +0 -2
  116. {hanzo_mcp-0.5.1.dist-info → hanzo_mcp-0.6.1.dist-info}/WHEEL +0 -0
  117. {hanzo_mcp-0.5.1.dist-info → hanzo_mcp-0.6.1.dist-info}/licenses/LICENSE +0 -0
  118. {hanzo_mcp-0.5.1.dist-info → hanzo_mcp-0.6.1.dist-info}/top_level.txt +0 -0
@@ -1,30 +1,31 @@
1
- """Unified search tool that combines grep, vector, AST, and semantic search.
1
+ """Unified search tool that runs multiple search types in parallel.
2
2
 
3
- This tool provides an intelligent multi-search approach that:
4
- 1. Always starts with fast grep/regex search
5
- 2. Enhances with vector similarity, AST context, and symbol search
6
- 3. Returns comprehensive results with function/method context
7
- 4. Optimizes performance through intelligent caching and batching
3
+ This tool consolidates all search capabilities and runs them concurrently:
4
+ - grep: Fast pattern/regex search using ripgrep
5
+ - grep_ast: AST-aware code search with structural context
6
+ - vector_search: Semantic similarity search
7
+ - git_search: Search through git history
8
+ - symbol_search: Find symbols (functions, classes) in code
9
+
10
+ Results are combined, deduplicated, and ranked for comprehensive search coverage.
8
11
  """
9
12
 
10
13
  import asyncio
11
- import json
12
14
  import re
13
- from dataclasses import dataclass, asdict
15
+ from dataclasses import dataclass
14
16
  from pathlib import Path
15
- from typing import Dict, List, Optional, Set, Tuple, Any, Union
17
+ from typing import Annotated, Dict, List, Optional, Set, Tuple, TypedDict, Unpack, final, override
16
18
  from enum import Enum
17
19
 
18
- from fastmcp import Context as MCPContext
19
- from fastmcp import FastMCP
20
+ from mcp.server.fastmcp import Context as MCPContext
21
+ from mcp.server import FastMCP
20
22
  from pydantic import Field
21
- from typing_extensions import Annotated, TypedDict, Unpack, final, override
22
23
 
23
24
  from hanzo_mcp.tools.filesystem.base import FilesystemBaseTool
24
25
  from hanzo_mcp.tools.filesystem.grep import Grep
25
- from hanzo_mcp.tools.filesystem.grep_ast_tool import GrepAstTool
26
+ from hanzo_mcp.tools.filesystem.symbols import SymbolsTool
27
+ from hanzo_mcp.tools.filesystem.git_search import GitSearchTool
26
28
  from hanzo_mcp.tools.vector.vector_search import VectorSearchTool
27
- from hanzo_mcp.tools.vector.ast_analyzer import ASTAnalyzer, Symbol
28
29
  from hanzo_mcp.tools.common.permissions import PermissionManager
29
30
  from hanzo_mcp.tools.vector.project_manager import ProjectVectorManager
30
31
 
@@ -32,60 +33,103 @@ from hanzo_mcp.tools.vector.project_manager import ProjectVectorManager
32
33
  class SearchType(Enum):
33
34
  """Types of searches that can be performed."""
34
35
  GREP = "grep"
35
- VECTOR = "vector"
36
- AST = "ast"
37
- SYMBOL = "symbol"
36
+ GREP_AST = "grep_ast"
37
+ VECTOR = "vector"
38
+ GIT = "git"
39
+ SYMBOL = "symbol" # Searches for function/class definitions
38
40
 
39
41
 
40
- @dataclass
42
+ @dataclass
41
43
  class SearchResult:
42
- """Unified search result combining different search types."""
44
+ """Unified search result from any search type."""
43
45
  file_path: str
44
46
  line_number: Optional[int]
45
47
  content: str
46
48
  search_type: SearchType
47
49
  score: float # Relevance score (0-1)
48
- context: Optional[str] = None # AST/function context
49
- symbol_info: Optional[Symbol] = None
50
- project: Optional[str] = None
51
-
52
- def to_dict(self) -> Dict[str, Any]:
53
- """Convert to dictionary for JSON serialization."""
54
- result = asdict(self)
55
- result['search_type'] = self.search_type.value
56
- if self.symbol_info:
57
- result['symbol_info'] = asdict(self.symbol_info)
58
- return result
50
+ context: Optional[str] = None # Function/class context
51
+ match_count: int = 1 # Number of matches in this location
59
52
 
60
53
 
61
- @dataclass
62
- class UnifiedSearchResults:
63
- """Container for all unified search results."""
64
- query: str
65
- total_results: int
66
- results_by_type: Dict[SearchType, List[SearchResult]]
67
- combined_results: List[SearchResult]
68
- search_time_ms: float
69
-
70
- def to_dict(self) -> Dict[str, Any]:
71
- """Convert to dictionary for JSON serialization."""
72
- return {
73
- 'query': self.query,
74
- 'total_results': self.total_results,
75
- 'results_by_type': {k.value: [r.to_dict() for r in v] for k, v in self.results_by_type.items()},
76
- 'combined_results': [r.to_dict() for r in self.combined_results],
77
- 'search_time_ms': self.search_time_ms,
78
- }
54
+ Pattern = Annotated[
55
+ str,
56
+ Field(
57
+ description="The search pattern (supports regex for grep, natural language for vector search)",
58
+ min_length=1,
59
+ ),
60
+ ]
61
+
62
+ SearchPath = Annotated[
63
+ str,
64
+ Field(
65
+ description="The directory to search in. Defaults to current directory.",
66
+ default=".",
67
+ ),
68
+ ]
79
69
 
70
+ Include = Annotated[
71
+ str,
72
+ Field(
73
+ description='File pattern to include (e.g. "*.js", "*.{ts,tsx}")',
74
+ default="*",
75
+ ),
76
+ ]
80
77
 
81
- Pattern = Annotated[str, Field(description="The search pattern/query", min_length=1)]
82
- SearchPath = Annotated[str, Field(description="Path to search in", default=".")]
83
- Include = Annotated[str, Field(description="File pattern to include", default="*")]
84
- MaxResults = Annotated[int, Field(description="Maximum results per search type", default=20)]
85
- EnableVector = Annotated[bool, Field(description="Enable vector/semantic search", default=True)]
86
- EnableAST = Annotated[bool, Field(description="Enable AST context search", default=True)]
87
- EnableSymbol = Annotated[bool, Field(description="Enable symbol search", default=True)]
88
- IncludeContext = Annotated[bool, Field(description="Include function/method context", default=True)]
78
+ MaxResults = Annotated[
79
+ int,
80
+ Field(
81
+ description="Maximum number of results to return",
82
+ default=50,
83
+ ),
84
+ ]
85
+
86
+ EnableGrep = Annotated[
87
+ bool,
88
+ Field(
89
+ description="Enable fast pattern/regex search",
90
+ default=True,
91
+ ),
92
+ ]
93
+
94
+ EnableGrepAst = Annotated[
95
+ bool,
96
+ Field(
97
+ description="Enable AST-aware search with code structure context",
98
+ default=True,
99
+ ),
100
+ ]
101
+
102
+ EnableVector = Annotated[
103
+ bool,
104
+ Field(
105
+ description="Enable semantic similarity search",
106
+ default=True,
107
+ ),
108
+ ]
109
+
110
+ EnableGit = Annotated[
111
+ bool,
112
+ Field(
113
+ description="Enable git history search",
114
+ default=True,
115
+ ),
116
+ ]
117
+
118
+ EnableSymbol = Annotated[
119
+ bool,
120
+ Field(
121
+ description="Enable symbol search (functions, classes)",
122
+ default=True,
123
+ ),
124
+ ]
125
+
126
+ IncludeContext = Annotated[
127
+ bool,
128
+ Field(
129
+ description="Include function/class context for matches",
130
+ default=True,
131
+ ),
132
+ ]
89
133
 
90
134
 
91
135
  class UnifiedSearchParams(TypedDict):
@@ -94,35 +138,38 @@ class UnifiedSearchParams(TypedDict):
94
138
  path: SearchPath
95
139
  include: Include
96
140
  max_results: MaxResults
141
+ enable_grep: EnableGrep
142
+ enable_grep_ast: EnableGrepAst
97
143
  enable_vector: EnableVector
98
- enable_ast: EnableAST
144
+ enable_git: EnableGit
99
145
  enable_symbol: EnableSymbol
100
146
  include_context: IncludeContext
101
147
 
102
148
 
103
149
  @final
104
150
  class UnifiedSearchTool(FilesystemBaseTool):
105
- """Unified search tool combining multiple search strategies."""
151
+ """Unified search tool that runs multiple search types in parallel."""
106
152
 
107
153
  def __init__(self, permission_manager: PermissionManager,
108
154
  project_manager: Optional[ProjectVectorManager] = None):
109
- """Initialize the unified search tool."""
155
+ """Initialize the unified search tool.
156
+
157
+ Args:
158
+ permission_manager: Permission manager for access control
159
+ project_manager: Optional project manager for vector search
160
+ """
110
161
  super().__init__(permission_manager)
111
162
  self.project_manager = project_manager
112
163
 
113
- # Initialize component search tools
164
+ # Initialize component tools
114
165
  self.grep_tool = Grep(permission_manager)
115
- self.grep_ast_tool = GrepAstTool(permission_manager)
116
- self.ast_analyzer = ASTAnalyzer()
166
+ self.grep_ast_tool = SymbolsTool(permission_manager)
167
+ self.git_search_tool = GitSearchTool(permission_manager)
117
168
 
118
169
  # Vector search is optional
119
170
  self.vector_tool = None
120
171
  if project_manager:
121
172
  self.vector_tool = VectorSearchTool(permission_manager, project_manager)
122
-
123
- # Cache for AST analysis results
124
- self._ast_cache: Dict[str, Any] = {}
125
- self._symbol_cache: Dict[str, List[Symbol]] = {}
126
173
 
127
174
  @property
128
175
  @override
@@ -130,114 +177,158 @@ class UnifiedSearchTool(FilesystemBaseTool):
130
177
  """Get the tool name."""
131
178
  return "unified_search"
132
179
 
133
- @property
180
+ @property
134
181
  @override
135
182
  def description(self) -> str:
136
183
  """Get the tool description."""
137
- return """Intelligent unified search combining grep, vector similarity, AST context, and symbol search.
184
+ return """Unified search that runs multiple search strategies in parallel.
138
185
 
139
- This tool provides the most comprehensive search experience by:
140
- 1. Starting with fast grep/regex search for immediate results
141
- 2. Enhancing with vector similarity for semantic matches
142
- 3. Adding AST context to show structural information
143
- 4. Including symbol search for code definitions
144
- 5. Providing function/method body context when relevant
186
+ Automatically runs the most appropriate search types based on your pattern:
187
+ - Pattern matching (grep) for exact text/regex
188
+ - AST search for code structure understanding
189
+ - Semantic search for concepts and meaning
190
+ - Git history for tracking changes
191
+ - Symbol search for finding definitions
145
192
 
146
- The tool intelligently combines results and provides relevance scoring across all search types.
147
- Use this when you need comprehensive search results or aren't sure which search type is best."""
193
+ All searches run concurrently for maximum speed. Results are combined,
194
+ deduplicated, and ranked by relevance.
148
195
 
149
- def _detect_search_intent(self, pattern: str) -> Tuple[bool, bool, bool]:
150
- """Analyze pattern to determine which search types to enable.
196
+ Examples:
197
+ - Search for TODO comments: pattern="TODO"
198
+ - Find error handling: pattern="error handling implementation"
199
+ - Locate function: pattern="processPayment"
200
+ - Track changes: pattern="bug fix" (searches git history too)
201
+
202
+ This is the recommended search tool for comprehensive results."""
203
+
204
+ def _analyze_pattern(self, pattern: str) -> Dict[str, bool]:
205
+ """Analyze the pattern to determine optimal search strategies.
151
206
 
207
+ Args:
208
+ pattern: The search pattern
209
+
152
210
  Returns:
153
- Tuple of (should_use_vector, should_use_ast, should_use_symbol)
211
+ Dictionary of search type recommendations
154
212
  """
155
- # Default to all enabled
156
- use_vector = True
157
- use_ast = True
158
- use_symbol = True
159
-
160
- # If pattern looks like regex, focus on text search
161
- regex_indicators = ['.*', '\\w', '\\d', '\\s', '[', ']', '(', ')', '|', '^', '$']
162
- if any(indicator in pattern for indicator in regex_indicators):
163
- use_vector = False # Regex patterns don't work well with vector search
164
-
165
- # If pattern looks like a function/class name, prioritize symbol search
166
- if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', pattern):
167
- use_symbol = True
168
- use_ast = True
169
-
170
- # If pattern contains natural language, prioritize vector search
213
+ # Check if pattern looks like regex
214
+ regex_chars = r'[.*+?^${}()|[\]\\]'
215
+ has_regex = bool(re.search(regex_chars, pattern))
216
+
217
+ # Check if pattern looks like a symbol name
218
+ is_symbol = bool(re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', pattern))
219
+
220
+ # Check if pattern is natural language
171
221
  words = pattern.split()
172
- if len(words) > 2 and not any(c in pattern for c in ['(', ')', '{', '}', '[', ']']):
173
- use_vector = True
174
-
175
- return use_vector, use_ast, use_symbol
222
+ is_natural_language = len(words) > 2 and not has_regex
223
+
224
+ return {
225
+ 'use_grep': True, # Always useful
226
+ 'use_grep_ast': not has_regex, # AST doesn't handle regex well
227
+ 'use_vector': is_natural_language or len(pattern) > 10,
228
+ 'use_git': True, # Always check history
229
+ 'use_symbol': is_symbol or 'def' in pattern or 'class' in pattern
230
+ }
176
231
 
177
- async def _run_grep_search(self, pattern: str, path: str, include: str,
232
+ async def _run_grep_search(self, pattern: str, path: str, include: str,
178
233
  tool_ctx, max_results: int) -> List[SearchResult]:
179
- """Run grep search and convert results."""
180
- await tool_ctx.info(f"Running grep search for: {pattern}")
181
-
234
+ """Run grep search and parse results."""
182
235
  try:
183
- # Use the existing grep tool
184
- grep_result = await self.grep_tool.call(
236
+ result = await self.grep_tool.call(
185
237
  tool_ctx.mcp_context,
186
238
  pattern=pattern,
187
- path=path,
239
+ path=path,
188
240
  include=include
189
241
  )
190
242
 
191
243
  results = []
192
- if "Found" in grep_result and "matches" in grep_result:
193
- # Parse grep results
194
- lines = grep_result.split('\n')
195
- for line in lines[2:]: # Skip header lines
196
- if ':' in line and len(line.strip()) > 0:
244
+ if "Found" in result and "matches" in result:
245
+ lines = result.split('\n')
246
+ for line in lines[2:]: # Skip header
247
+ if ':' in line and line.strip():
197
248
  try:
198
249
  parts = line.split(':', 2)
199
250
  if len(parts) >= 3:
200
- file_path = parts[0]
201
- line_num = int(parts[1])
202
- content = parts[2].strip()
203
-
204
- result = SearchResult(
205
- file_path=file_path,
206
- line_number=line_num,
207
- content=content,
251
+ results.append(SearchResult(
252
+ file_path=parts[0],
253
+ line_number=int(parts[1]),
254
+ content=parts[2].strip(),
208
255
  search_type=SearchType.GREP,
209
- score=1.0, # Grep results are exact matches
210
- )
211
- results.append(result)
212
-
256
+ score=1.0 # Exact matches get perfect score
257
+ ))
213
258
  if len(results) >= max_results:
214
259
  break
215
- except (ValueError, IndexError):
260
+ except ValueError:
216
261
  continue
217
262
 
218
- await tool_ctx.info(f"Grep search found {len(results)} results")
263
+ await tool_ctx.info(f"Grep found {len(results)} results")
219
264
  return results
220
-
265
+
266
+ except Exception as e:
267
+ await tool_ctx.error(f"Grep search failed: {e}")
268
+ return []
269
+
270
+ async def _run_grep_ast_search(self, pattern: str, path: str,
271
+ tool_ctx, max_results: int) -> List[SearchResult]:
272
+ """Run AST-aware search and parse results."""
273
+ try:
274
+ result = await self.grep_ast_tool.call(
275
+ tool_ctx.mcp_context,
276
+ pattern=pattern,
277
+ path=path,
278
+ ignore_case=True,
279
+ line_number=True
280
+ )
281
+
282
+ results = []
283
+ if result and not result.startswith("No matches"):
284
+ current_file = None
285
+ current_context = []
286
+
287
+ for line in result.split('\n'):
288
+ if line.endswith(':') and '/' in line:
289
+ current_file = line[:-1]
290
+ current_context = []
291
+ elif current_file and ':' in line:
292
+ try:
293
+ # Try to parse line with number
294
+ parts = line.split(':', 1)
295
+ line_num = int(parts[0].strip())
296
+ content = parts[1].strip() if len(parts) > 1 else ""
297
+
298
+ results.append(SearchResult(
299
+ file_path=current_file,
300
+ line_number=line_num,
301
+ content=content,
302
+ search_type=SearchType.GREP_AST,
303
+ score=0.95, # High score for AST matches
304
+ context=" > ".join(current_context) if current_context else None
305
+ ))
306
+
307
+ if len(results) >= max_results:
308
+ break
309
+ except ValueError:
310
+ # This might be context info
311
+ if line.strip():
312
+ current_context.append(line.strip())
313
+
314
+ await tool_ctx.info(f"AST search found {len(results)} results")
315
+ return results
316
+
221
317
  except Exception as e:
222
- await tool_ctx.error(f"Grep search failed: {str(e)}")
318
+ await tool_ctx.error(f"AST search failed: {e}")
223
319
  return []
224
320
 
225
- async def _run_vector_search(self, pattern: str, path: str, tool_ctx,
226
- max_results: int) -> List[SearchResult]:
227
- """Run vector search and convert results."""
321
+ async def _run_vector_search(self, pattern: str, path: str,
322
+ tool_ctx, max_results: int) -> List[SearchResult]:
323
+ """Run semantic vector search."""
228
324
  if not self.vector_tool:
229
325
  return []
230
326
 
231
- await tool_ctx.info(f"Running vector search for: {pattern}")
232
-
233
327
  try:
234
- # Determine search scope based on path
235
- if path == ".":
236
- search_scope = "current"
237
- else:
238
- search_scope = "all" # Could be enhanced to detect project
328
+ # Determine search scope
329
+ search_scope = "current" if path == "." else "all"
239
330
 
240
- vector_result = await self.vector_tool.call(
331
+ result = await self.vector_tool.call(
241
332
  tool_ctx.mcp_context,
242
333
  query=pattern,
243
334
  limit=max_results,
@@ -247,37 +338,32 @@ Use this when you need comprehensive search results or aren't sure which search
247
338
  )
248
339
 
249
340
  results = []
250
- # Parse vector search results - this would need to be enhanced
251
- # based on the actual format returned by vector_tool
252
- if "Found" in vector_result:
253
- # This is a simplified parser - would need to match actual format
254
- lines = vector_result.split('\n')
341
+ if "Found" in result:
342
+ # Parse vector search results
343
+ lines = result.split('\n')
255
344
  current_file = None
256
345
  current_score = 0.0
257
346
 
258
347
  for line in lines:
259
348
  if "Result" in line and "Score:" in line:
260
- # Extract score
349
+ # Extract score and file
261
350
  score_match = re.search(r'Score: ([\d.]+)%', line)
262
351
  if score_match:
263
352
  current_score = float(score_match.group(1)) / 100.0
264
353
 
265
- # Extract file path
266
- if " - " in line:
267
- parts = line.split(" - ")
268
- if len(parts) > 1:
269
- current_file = parts[-1].strip()
354
+ file_match = re.search(r' - ([^\s]+)$', line)
355
+ if file_match:
356
+ current_file = file_match.group(1)
270
357
 
271
358
  elif current_file and line.strip() and not line.startswith('-'):
272
- # This is content
273
- result = SearchResult(
359
+ # Content line
360
+ results.append(SearchResult(
274
361
  file_path=current_file,
275
362
  line_number=None,
276
- content=line.strip(),
363
+ content=line.strip()[:200], # Limit content length
277
364
  search_type=SearchType.VECTOR,
278
- score=current_score,
279
- )
280
- results.append(result)
365
+ score=current_score
366
+ ))
281
367
 
282
368
  if len(results) >= max_results:
283
369
  break
@@ -286,227 +372,162 @@ Use this when you need comprehensive search results or aren't sure which search
286
372
  return results
287
373
 
288
374
  except Exception as e:
289
- await tool_ctx.error(f"Vector search failed: {str(e)}")
375
+ await tool_ctx.error(f"Vector search failed: {e}")
290
376
  return []
291
377
 
292
- async def _run_ast_search(self, pattern: str, path: str, include: str,
378
+ async def _run_git_search(self, pattern: str, path: str,
293
379
  tool_ctx, max_results: int) -> List[SearchResult]:
294
- """Run AST-aware search and convert results."""
295
- await tool_ctx.info(f"Running AST search for: {pattern}")
296
-
380
+ """Run git history search."""
297
381
  try:
298
- ast_result = await self.grep_ast_tool.call(
299
- tool_ctx.mcp_context,
300
- pattern=pattern,
301
- path=path,
302
- ignore_case=False,
303
- line_number=True
304
- )
382
+ # Search in both content and commits
383
+ tasks = [
384
+ self.git_search_tool.call(
385
+ tool_ctx.mcp_context,
386
+ pattern=pattern,
387
+ path=path,
388
+ search_type="content",
389
+ max_count=max_results // 2
390
+ ),
391
+ self.git_search_tool.call(
392
+ tool_ctx.mcp_context,
393
+ pattern=pattern,
394
+ path=path,
395
+ search_type="commits",
396
+ max_count=max_results // 2
397
+ )
398
+ ]
399
+
400
+ git_results = await asyncio.gather(*tasks, return_exceptions=True)
305
401
 
306
402
  results = []
307
- if ast_result and not ast_result.startswith("No matches"):
308
- # Parse AST results - they include structural context
309
- current_file = None
310
- context_lines = []
311
-
312
- for line in ast_result.split('\n'):
313
- if line.endswith(':') and '/' in line:
314
- # This is a file header
315
- current_file = line[:-1]
316
- context_lines = []
317
- elif current_file and line.strip():
318
- if ':' in line and line.strip()[0].isdigit():
319
- # This looks like a line with number
320
- try:
321
- parts = line.split(':', 1)
322
- line_num = int(parts[0].strip())
323
- content = parts[1].strip() if len(parts) > 1 else ""
324
-
325
- result = SearchResult(
326
- file_path=current_file,
327
- line_number=line_num,
328
- content=content,
329
- search_type=SearchType.AST,
330
- score=0.9, # High score for AST matches
331
- context='\n'.join(context_lines) if context_lines else None
332
- )
333
- results.append(result)
403
+ for i, result in enumerate(git_results):
404
+ if isinstance(result, Exception):
405
+ continue
406
+
407
+ if "Found" in result:
408
+ # Parse git results
409
+ lines = result.split('\n')
410
+ for line in lines:
411
+ if ':' in line and line.strip():
412
+ parts = line.split(':', 2)
413
+ if len(parts) >= 2:
414
+ results.append(SearchResult(
415
+ file_path=parts[0].strip(),
416
+ line_number=None,
417
+ content=parts[-1].strip() if len(parts) > 2 else line,
418
+ search_type=SearchType.GIT,
419
+ score=0.8 # Good score for git matches
420
+ ))
334
421
 
335
422
  if len(results) >= max_results:
336
423
  break
337
-
338
- except ValueError:
339
- context_lines.append(line)
340
- else:
341
- context_lines.append(line)
342
424
 
343
- await tool_ctx.info(f"AST search found {len(results)} results")
425
+ await tool_ctx.info(f"Git search found {len(results)} results")
344
426
  return results
345
427
 
346
428
  except Exception as e:
347
- await tool_ctx.error(f"AST search failed: {str(e)}")
429
+ await tool_ctx.error(f"Git search failed: {e}")
348
430
  return []
349
431
 
350
- async def _run_symbol_search(self, pattern: str, path: str, tool_ctx,
351
- max_results: int) -> List[SearchResult]:
352
- """Run symbol search using AST analysis."""
353
- await tool_ctx.info(f"Running symbol search for: {pattern}")
354
-
432
+ async def _run_symbol_search(self, pattern: str, path: str,
433
+ tool_ctx, max_results: int) -> List[SearchResult]:
434
+ """Search for symbol definitions using grep with specific patterns."""
355
435
  try:
356
- results = []
357
- path_obj = Path(path)
436
+ # Create patterns for common symbol definitions
437
+ symbol_patterns = [
438
+ f"(def|class|function|func|fn)\\s+{pattern}", # Python, JS, various
439
+ f"(public|private|protected)?\\s*(static)?\\s*\\w+\\s+{pattern}\\s*\\(", # Java/C++
440
+ f"const\\s+{pattern}\\s*=", # JS/TS const
441
+ f"let\\s+{pattern}\\s*=", # JS/TS let
442
+ f"var\\s+{pattern}\\s*=", # JS/TS var
443
+ ]
444
+
445
+ # Run grep searches in parallel for each pattern
446
+ tasks = []
447
+ for sp in symbol_patterns:
448
+ tasks.append(
449
+ self.grep_tool.call(
450
+ tool_ctx.mcp_context,
451
+ pattern=sp,
452
+ path=path,
453
+ include="*"
454
+ )
455
+ )
358
456
 
359
- # Find files to analyze
360
- files_to_check = []
361
- if path_obj.is_file():
362
- files_to_check.append(str(path_obj))
363
- elif path_obj.is_dir():
364
- # Look for source files
365
- for ext in ['.py', '.js', '.ts', '.java', '.cpp', '.c']:
366
- files_to_check.extend(path_obj.rglob(f'*{ext}'))
367
- files_to_check = [str(f) for f in files_to_check[:50]] # Limit for performance
457
+ grep_results = await asyncio.gather(*tasks, return_exceptions=True)
368
458
 
369
- # Analyze files for symbols
370
- for file_path in files_to_check:
371
- if not self.is_path_allowed(file_path):
459
+ results = []
460
+ for result in grep_results:
461
+ if isinstance(result, Exception):
372
462
  continue
373
463
 
374
- # Check cache first
375
- if file_path in self._symbol_cache:
376
- symbols = self._symbol_cache[file_path]
377
- else:
378
- # Analyze file
379
- file_ast = self.ast_analyzer.analyze_file(file_path)
380
- symbols = file_ast.symbols if file_ast else []
381
- self._symbol_cache[file_path] = symbols
382
-
383
- # Search symbols
384
- for symbol in symbols:
385
- if re.search(pattern, symbol.name, re.IGNORECASE):
386
- result = SearchResult(
387
- file_path=symbol.file_path,
388
- line_number=symbol.line_start,
389
- content=f"{symbol.type} {symbol.name}" + (f" - {symbol.docstring[:100]}..." if symbol.docstring else ""),
390
- search_type=SearchType.SYMBOL,
391
- score=0.95, # Very high score for symbol matches
392
- symbol_info=symbol,
393
- context=symbol.signature
394
- )
395
- results.append(result)
396
-
397
- if len(results) >= max_results:
398
- break
399
-
400
- if len(results) >= max_results:
401
- break
464
+ if "Found" in result and "matches" in result:
465
+ lines = result.split('\n')
466
+ for line in lines[2:]: # Skip header
467
+ if ':' in line and line.strip():
468
+ try:
469
+ parts = line.split(':', 2)
470
+ if len(parts) >= 3:
471
+ results.append(SearchResult(
472
+ file_path=parts[0],
473
+ line_number=int(parts[1]),
474
+ content=parts[2].strip(),
475
+ search_type=SearchType.SYMBOL,
476
+ score=0.98 # Very high score for symbol definitions
477
+ ))
478
+ if len(results) >= max_results:
479
+ break
480
+ except ValueError:
481
+ continue
402
482
 
403
483
  await tool_ctx.info(f"Symbol search found {len(results)} results")
404
484
  return results
405
485
 
406
486
  except Exception as e:
407
- await tool_ctx.error(f"Symbol search failed: {str(e)}")
487
+ await tool_ctx.error(f"Symbol search failed: {e}")
408
488
  return []
409
489
 
410
- async def _add_function_context(self, results: List[SearchResult], tool_ctx) -> List[SearchResult]:
411
- """Add function/method context to results where relevant."""
412
- enhanced_results = []
490
+ def _deduplicate_results(self, all_results: List[SearchResult]) -> List[SearchResult]:
491
+ """Deduplicate results, keeping the highest scoring version."""
492
+ seen = {}
413
493
 
414
- for result in results:
415
- enhanced_result = result
494
+ for result in all_results:
495
+ key = (result.file_path, result.line_number)
416
496
 
417
- if result.line_number and not result.context:
418
- try:
419
- # Read the file and find surrounding function
420
- file_path = Path(result.file_path)
421
- if file_path.exists() and self.is_path_allowed(str(file_path)):
422
-
423
- # Check if we have AST analysis cached
424
- if str(file_path) not in self._ast_cache:
425
- file_ast = self.ast_analyzer.analyze_file(str(file_path))
426
- self._ast_cache[str(file_path)] = file_ast
427
- else:
428
- file_ast = self._ast_cache[str(file_path)]
429
-
430
- if file_ast:
431
- # Find symbol containing this line
432
- for symbol in file_ast.symbols:
433
- if (symbol.line_start <= result.line_number <= symbol.line_end and
434
- symbol.type in ['function', 'method']):
435
- enhanced_result = SearchResult(
436
- file_path=result.file_path,
437
- line_number=result.line_number,
438
- content=result.content,
439
- search_type=result.search_type,
440
- score=result.score,
441
- context=f"In {symbol.type} {symbol.name}(): {symbol.signature or ''}",
442
- symbol_info=symbol,
443
- project=result.project
444
- )
445
- break
446
- except Exception as e:
447
- await tool_ctx.warning(f"Could not add context for {result.file_path}: {str(e)}")
448
-
449
- enhanced_results.append(enhanced_result)
497
+ if key not in seen or result.score > seen[key].score:
498
+ seen[key] = result
499
+ elif key in seen and result.context and not seen[key].context:
500
+ # Add context if missing
501
+ seen[key].context = result.context
450
502
 
451
- return enhanced_results
503
+ return list(seen.values())
452
504
 
453
- def _combine_and_rank_results(self, results_by_type: Dict[SearchType, List[SearchResult]]) -> List[SearchResult]:
454
- """Combine results from different search types and rank by relevance."""
455
- all_results = []
456
- seen_combinations = set()
457
-
458
- # Combine all results, avoiding duplicates
459
- for search_type, results in results_by_type.items():
460
- for result in results:
461
- # Create a key to identify duplicates
462
- key = (result.file_path, result.line_number)
463
-
464
- if key not in seen_combinations:
465
- seen_combinations.add(key)
466
- all_results.append(result)
467
- else:
468
- # Merge with existing result based on score and type priority
469
- type_priority = {
470
- SearchType.SYMBOL: 4,
471
- SearchType.GREP: 3,
472
- SearchType.AST: 2,
473
- SearchType.VECTOR: 1
474
- }
475
-
476
- for existing in all_results:
477
- existing_key = (existing.file_path, existing.line_number)
478
- if existing_key == key:
479
- # Update if the new result has higher priority or better score
480
- result_priority = type_priority[result.search_type]
481
- existing_priority = type_priority[existing.search_type]
482
-
483
- # Replace existing if: higher priority type, or same priority but higher score
484
- if (result_priority > existing_priority or
485
- (result_priority == existing_priority and result.score > existing.score)):
486
- # Replace the entire result to preserve type
487
- idx = all_results.index(existing)
488
- all_results[idx] = result
489
- else:
490
- # Still merge useful information
491
- existing.context = existing.context or result.context
492
- existing.symbol_info = existing.symbol_info or result.symbol_info
493
- break
494
-
495
- # Sort by score (descending) then by search type priority
505
+ def _rank_results(self, results: List[SearchResult]) -> List[SearchResult]:
506
+ """Rank results by relevance score and search type priority."""
507
+ # Define search type priorities
496
508
  type_priority = {
497
- SearchType.SYMBOL: 4,
498
- SearchType.GREP: 3,
499
- SearchType.AST: 2,
509
+ SearchType.SYMBOL: 5,
510
+ SearchType.GREP: 4,
511
+ SearchType.GREP_AST: 3,
512
+ SearchType.GIT: 2,
500
513
  SearchType.VECTOR: 1
501
514
  }
502
515
 
503
- all_results.sort(key=lambda r: (r.score, type_priority[r.search_type]), reverse=True)
516
+ # Sort by score (descending) and then by type priority
517
+ results.sort(
518
+ key=lambda r: (r.score, type_priority.get(r.search_type, 0)),
519
+ reverse=True
520
+ )
504
521
 
505
- return all_results
522
+ return results
506
523
 
507
524
  @override
508
- async def call(self, ctx: MCPContext, **params: Unpack[UnifiedSearchParams]) -> str:
509
- """Execute unified search with all enabled search types."""
525
+ async def call(
526
+ self,
527
+ ctx: MCPContext,
528
+ **params: Unpack[UnifiedSearchParams],
529
+ ) -> str:
530
+ """Execute unified search across all enabled search types."""
510
531
  import time
511
532
  start_time = time.time()
512
533
 
@@ -516,10 +537,7 @@ Use this when you need comprehensive search results or aren't sure which search
516
537
  pattern = params["pattern"]
517
538
  path = params.get("path", ".")
518
539
  include = params.get("include", "*")
519
- max_results = params.get("max_results", 20)
520
- enable_vector = params.get("enable_vector", True)
521
- enable_ast = params.get("enable_ast", True)
522
- enable_symbol = params.get("enable_symbol", True)
540
+ max_results = params.get("max_results", 50)
523
541
  include_context = params.get("include_context", True)
524
542
 
525
543
  # Validate path
@@ -528,136 +546,136 @@ Use this when you need comprehensive search results or aren't sure which search
528
546
  await tool_ctx.error(path_validation.error_message)
529
547
  return f"Error: {path_validation.error_message}"
530
548
 
531
- # Check path permissions and existence
549
+ # Check permissions
532
550
  allowed, error_msg = await self.check_path_allowed(path, tool_ctx)
533
551
  if not allowed:
534
552
  return error_msg
535
-
553
+
554
+ # Check existence
536
555
  exists, error_msg = await self.check_path_exists(path, tool_ctx)
537
556
  if not exists:
538
557
  return error_msg
539
558
 
540
- # Analyze search intent to optimize which searches to run
541
- should_vector, should_ast, should_symbol = self._detect_search_intent(pattern)
542
- enable_vector = enable_vector and should_vector
543
- enable_ast = enable_ast and should_ast
544
- enable_symbol = enable_symbol and should_symbol
559
+ # Analyze pattern to determine best search strategies
560
+ pattern_analysis = self._analyze_pattern(pattern)
545
561
 
546
562
  await tool_ctx.info(f"Starting unified search for '{pattern}' in {path}")
547
- await tool_ctx.info(f"Enabled searches: grep=True vector={enable_vector} ast={enable_ast} symbol={enable_symbol}")
548
563
 
549
- # Run searches in parallel for maximum efficiency
564
+ # Build list of search tasks based on enabled types and pattern analysis
550
565
  search_tasks = []
566
+ search_names = []
551
567
 
552
- # Always run grep first (fastest, most reliable)
553
- search_tasks.append(
554
- self._run_grep_search(pattern, path, include, tool_ctx, max_results)
555
- )
568
+ if params.get("enable_grep", True) and pattern_analysis['use_grep']:
569
+ search_tasks.append(self._run_grep_search(pattern, path, include, tool_ctx, max_results))
570
+ search_names.append("grep")
556
571
 
557
- if enable_vector and self.vector_tool:
558
- search_tasks.append(
559
- self._run_vector_search(pattern, path, tool_ctx, max_results)
560
- )
572
+ if params.get("enable_grep_ast", True) and pattern_analysis['use_grep_ast']:
573
+ search_tasks.append(self._run_grep_ast_search(pattern, path, tool_ctx, max_results))
574
+ search_names.append("grep_ast")
561
575
 
562
- if enable_ast:
563
- search_tasks.append(
564
- self._run_ast_search(pattern, path, include, tool_ctx, max_results)
565
- )
576
+ if params.get("enable_vector", True) and self.vector_tool and pattern_analysis['use_vector']:
577
+ search_tasks.append(self._run_vector_search(pattern, path, tool_ctx, max_results))
578
+ search_names.append("vector")
566
579
 
567
- if enable_symbol:
568
- search_tasks.append(
569
- self._run_symbol_search(pattern, path, tool_ctx, max_results)
570
- )
580
+ if params.get("enable_git", True) and pattern_analysis['use_git']:
581
+ search_tasks.append(self._run_git_search(pattern, path, tool_ctx, max_results))
582
+ search_names.append("git")
583
+
584
+ if params.get("enable_symbol", True) and pattern_analysis['use_symbol']:
585
+ search_tasks.append(self._run_symbol_search(pattern, path, tool_ctx, max_results))
586
+ search_names.append("symbol")
571
587
 
572
- # Execute all searches in parallel
588
+ await tool_ctx.info(f"Running {len(search_tasks)} search types in parallel: {', '.join(search_names)}")
589
+
590
+ # Run all searches in parallel
573
591
  search_results = await asyncio.gather(*search_tasks, return_exceptions=True)
574
592
 
575
- # Organize results by type
593
+ # Collect all results
594
+ all_results = []
576
595
  results_by_type = {}
577
- search_types = [SearchType.GREP]
578
- if enable_vector and self.vector_tool:
579
- search_types.append(SearchType.VECTOR)
580
- if enable_ast:
581
- search_types.append(SearchType.AST)
582
- if enable_symbol:
583
- search_types.append(SearchType.SYMBOL)
584
-
585
- for i, result in enumerate(search_results):
586
- if isinstance(result, Exception):
587
- await tool_ctx.error(f"Search failed: {str(result)}")
588
- continue
589
-
590
- search_type = search_types[i]
591
- results_by_type[search_type] = result
592
596
 
593
- # Add function context if requested
594
- if include_context:
595
- for search_type, results in results_by_type.items():
596
- if results:
597
- results_by_type[search_type] = await self._add_function_context(results, tool_ctx)
597
+ for search_type, results in zip(search_names, search_results):
598
+ if isinstance(results, Exception):
599
+ await tool_ctx.error(f"{search_type} search failed: {results}")
600
+ results_by_type[search_type] = []
601
+ else:
602
+ results_by_type[search_type] = results
603
+ all_results.extend(results)
598
604
 
599
- # Combine and rank all results
600
- combined_results = self._combine_and_rank_results(results_by_type)
605
+ # Deduplicate and rank results
606
+ unique_results = self._deduplicate_results(all_results)
607
+ ranked_results = self._rank_results(unique_results)
601
608
 
602
- end_time = time.time()
603
- search_time_ms = (end_time - start_time) * 1000
609
+ # Limit total results
610
+ final_results = ranked_results[:max_results]
604
611
 
605
- # Create unified results object
606
- unified_results = UnifiedSearchResults(
607
- query=pattern,
608
- total_results=len(combined_results),
609
- results_by_type=results_by_type,
610
- combined_results=combined_results[:max_results * 2], # Allow some extra for variety
611
- search_time_ms=search_time_ms
612
- )
612
+ # Calculate search time
613
+ search_time = (time.time() - start_time) * 1000
613
614
 
614
615
  # Format output
615
- return self._format_unified_results(unified_results)
616
+ return self._format_results(
617
+ pattern=pattern,
618
+ results=final_results,
619
+ results_by_type=results_by_type,
620
+ search_time_ms=search_time,
621
+ include_context=include_context
622
+ )
616
623
 
617
- def _format_unified_results(self, results: UnifiedSearchResults) -> str:
618
- """Format unified search results for display."""
619
- if results.total_results == 0:
620
- return f"No results found for query: '{results.query}'"
621
-
622
- lines = [
623
- f"Unified Search Results for '{results.query}' ({results.search_time_ms:.1f}ms)",
624
- f"Found {results.total_results} total results across {len(results.results_by_type)} search types",
625
- ""
626
- ]
627
-
628
- # Show summary by type
629
- for search_type, type_results in results.results_by_type.items():
624
+ def _format_results(self, pattern: str, results: List[SearchResult],
625
+ results_by_type: Dict[str, List[SearchResult]],
626
+ search_time_ms: float, include_context: bool) -> str:
627
+ """Format search results for display."""
628
+ output = []
629
+
630
+ # Header
631
+ output.append(f"=== Unified Search Results ===")
632
+ output.append(f"Pattern: '{pattern}'")
633
+ output.append(f"Total results: {len(results)}")
634
+ output.append(f"Search time: {search_time_ms:.1f}ms")
635
+
636
+ # Summary by type
637
+ output.append("\nResults by type:")
638
+ for search_type, type_results in results_by_type.items():
630
639
  if type_results:
631
- lines.append(f"{search_type.value.title()}: {len(type_results)} results")
632
- lines.append("")
640
+ output.append(f" {search_type}: {len(type_results)} matches")
633
641
 
634
- # Show top combined results
635
- lines.append("=== Top Results (Combined & Ranked) ===")
636
- for i, result in enumerate(results.combined_results[:20], 1):
637
- score_display = f"{result.score:.2f}" if result.score < 1.0 else "1.00"
638
-
639
- header = f"Result {i} [{result.search_type.value}] (Score: {score_display})"
640
- if result.line_number:
641
- header += f" - {result.file_path}:{result.line_number}"
642
- else:
643
- header += f" - {result.file_path}"
644
-
645
- lines.append(header)
646
- lines.append("-" * len(header))
647
-
648
- if result.context:
649
- lines.append(f"Context: {result.context}")
642
+ if not results:
643
+ output.append("\nNo results found.")
644
+ return "\n".join(output)
645
+
646
+ # Group results by file
647
+ results_by_file = {}
648
+ for result in results:
649
+ if result.file_path not in results_by_file:
650
+ results_by_file[result.file_path] = []
651
+ results_by_file[result.file_path].append(result)
652
+
653
+ # Display results
654
+ output.append(f"\n=== Results ({len(results)} total) ===\n")
655
+
656
+ for file_path, file_results in results_by_file.items():
657
+ output.append(f"{file_path}")
658
+ output.append("-" * len(file_path))
650
659
 
651
- lines.append(f"Content: {result.content}")
660
+ # Sort by line number
661
+ file_results.sort(key=lambda r: r.line_number or 0)
652
662
 
653
- if result.symbol_info:
654
- lines.append(f"Symbol: {result.symbol_info.type} {result.symbol_info.name}")
655
- if result.symbol_info.signature:
656
- lines.append(f"Signature: {result.symbol_info.signature}")
663
+ for result in file_results:
664
+ # Format result line
665
+ score_str = f"[{result.search_type.value} {result.score:.2f}]"
666
+
667
+ if result.line_number:
668
+ output.append(f" {result.line_number:>4}: {score_str} {result.content}")
669
+ else:
670
+ output.append(f" {score_str} {result.content}")
671
+
672
+ # Add context if available and requested
673
+ if include_context and result.context:
674
+ output.append(f" Context: {result.context}")
657
675
 
658
- lines.append("")
676
+ output.append("") # Empty line between files
659
677
 
660
- return "\n".join(lines)
678
+ return "\n".join(output)
661
679
 
662
680
  @override
663
681
  def register(self, mcp_server: FastMCP) -> None:
@@ -670,9 +688,11 @@ Use this when you need comprehensive search results or aren't sure which search
670
688
  pattern: Pattern,
671
689
  path: SearchPath = ".",
672
690
  include: Include = "*",
673
- max_results: MaxResults = 20,
691
+ max_results: MaxResults = 50,
692
+ enable_grep: EnableGrep = True,
693
+ enable_grep_ast: EnableGrepAst = True,
674
694
  enable_vector: EnableVector = True,
675
- enable_ast: EnableAST = True,
695
+ enable_git: EnableGit = True,
676
696
  enable_symbol: EnableSymbol = True,
677
697
  include_context: IncludeContext = True,
678
698
  ) -> str:
@@ -682,8 +702,10 @@ Use this when you need comprehensive search results or aren't sure which search
682
702
  path=path,
683
703
  include=include,
684
704
  max_results=max_results,
705
+ enable_grep=enable_grep,
706
+ enable_grep_ast=enable_grep_ast,
685
707
  enable_vector=enable_vector,
686
- enable_ast=enable_ast,
708
+ enable_git=enable_git,
687
709
  enable_symbol=enable_symbol,
688
710
  include_context=include_context,
689
711
  )