hanzo-mcp 0.8.11__py3-none-any.whl → 0.8.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (154) hide show
  1. hanzo_mcp/__init__.py +2 -4
  2. hanzo_mcp/analytics/posthog_analytics.py +3 -9
  3. hanzo_mcp/bridge.py +9 -25
  4. hanzo_mcp/cli.py +6 -15
  5. hanzo_mcp/cli_enhanced.py +5 -14
  6. hanzo_mcp/cli_plugin.py +3 -9
  7. hanzo_mcp/config/settings.py +6 -20
  8. hanzo_mcp/config/tool_config.py +1 -3
  9. hanzo_mcp/core/base_agent.py +88 -88
  10. hanzo_mcp/core/model_registry.py +238 -210
  11. hanzo_mcp/dev_server.py +5 -15
  12. hanzo_mcp/prompts/__init__.py +2 -6
  13. hanzo_mcp/prompts/project_todo_reminder.py +3 -9
  14. hanzo_mcp/prompts/tool_explorer.py +1 -3
  15. hanzo_mcp/prompts/utils.py +7 -21
  16. hanzo_mcp/server.py +13 -6
  17. hanzo_mcp/tools/__init__.py +10 -24
  18. hanzo_mcp/tools/agent/__init__.py +2 -1
  19. hanzo_mcp/tools/agent/agent.py +10 -30
  20. hanzo_mcp/tools/agent/agent_tool.py +5 -15
  21. hanzo_mcp/tools/agent/agent_tool_v1_deprecated.py +14 -41
  22. hanzo_mcp/tools/agent/claude_desktop_auth.py +3 -9
  23. hanzo_mcp/tools/agent/cli_agent_base.py +7 -24
  24. hanzo_mcp/tools/agent/cli_tools.py +75 -74
  25. hanzo_mcp/tools/agent/code_auth.py +1 -3
  26. hanzo_mcp/tools/agent/code_auth_tool.py +2 -6
  27. hanzo_mcp/tools/agent/critic_tool.py +8 -24
  28. hanzo_mcp/tools/agent/iching_tool.py +12 -36
  29. hanzo_mcp/tools/agent/network_tool.py +7 -18
  30. hanzo_mcp/tools/agent/prompt.py +1 -5
  31. hanzo_mcp/tools/agent/review_tool.py +10 -25
  32. hanzo_mcp/tools/agent/swarm_alias.py +1 -3
  33. hanzo_mcp/tools/agent/swarm_tool.py +9 -29
  34. hanzo_mcp/tools/agent/swarm_tool_v1_deprecated.py +11 -39
  35. hanzo_mcp/tools/agent/unified_cli_tools.py +38 -38
  36. hanzo_mcp/tools/common/batch_tool.py +15 -45
  37. hanzo_mcp/tools/common/config_tool.py +9 -28
  38. hanzo_mcp/tools/common/context.py +1 -3
  39. hanzo_mcp/tools/common/critic_tool.py +1 -3
  40. hanzo_mcp/tools/common/decorators.py +2 -6
  41. hanzo_mcp/tools/common/enhanced_base.py +2 -6
  42. hanzo_mcp/tools/common/fastmcp_pagination.py +4 -12
  43. hanzo_mcp/tools/common/forgiving_edit.py +9 -28
  44. hanzo_mcp/tools/common/mode.py +1 -5
  45. hanzo_mcp/tools/common/paginated_base.py +3 -11
  46. hanzo_mcp/tools/common/paginated_response.py +10 -30
  47. hanzo_mcp/tools/common/pagination.py +3 -9
  48. hanzo_mcp/tools/common/permissions.py +38 -11
  49. hanzo_mcp/tools/common/personality.py +9 -34
  50. hanzo_mcp/tools/common/plugin_loader.py +3 -15
  51. hanzo_mcp/tools/common/stats.py +6 -18
  52. hanzo_mcp/tools/common/thinking_tool.py +1 -3
  53. hanzo_mcp/tools/common/tool_disable.py +2 -6
  54. hanzo_mcp/tools/common/tool_list.py +2 -6
  55. hanzo_mcp/tools/common/validation.py +1 -3
  56. hanzo_mcp/tools/config/config_tool.py +7 -13
  57. hanzo_mcp/tools/config/index_config.py +1 -3
  58. hanzo_mcp/tools/config/mode_tool.py +5 -15
  59. hanzo_mcp/tools/database/database_manager.py +3 -9
  60. hanzo_mcp/tools/database/graph.py +1 -3
  61. hanzo_mcp/tools/database/graph_add.py +3 -9
  62. hanzo_mcp/tools/database/graph_query.py +11 -34
  63. hanzo_mcp/tools/database/graph_remove.py +3 -9
  64. hanzo_mcp/tools/database/graph_search.py +6 -20
  65. hanzo_mcp/tools/database/graph_stats.py +11 -33
  66. hanzo_mcp/tools/database/sql.py +4 -12
  67. hanzo_mcp/tools/database/sql_query.py +6 -10
  68. hanzo_mcp/tools/database/sql_search.py +2 -6
  69. hanzo_mcp/tools/database/sql_stats.py +5 -15
  70. hanzo_mcp/tools/editor/neovim_command.py +1 -3
  71. hanzo_mcp/tools/editor/neovim_edit.py +2 -2
  72. hanzo_mcp/tools/editor/neovim_session.py +7 -13
  73. hanzo_mcp/tools/filesystem/__init__.py +2 -3
  74. hanzo_mcp/tools/filesystem/ast_multi_edit.py +14 -43
  75. hanzo_mcp/tools/filesystem/base.py +4 -12
  76. hanzo_mcp/tools/filesystem/batch_search.py +35 -115
  77. hanzo_mcp/tools/filesystem/content_replace.py +4 -12
  78. hanzo_mcp/tools/filesystem/diff.py +2 -10
  79. hanzo_mcp/tools/filesystem/directory_tree.py +9 -27
  80. hanzo_mcp/tools/filesystem/directory_tree_paginated.py +5 -15
  81. hanzo_mcp/tools/filesystem/edit.py +6 -18
  82. hanzo_mcp/tools/filesystem/find.py +3 -9
  83. hanzo_mcp/tools/filesystem/find_files.py +2 -6
  84. hanzo_mcp/tools/filesystem/git_search.py +9 -24
  85. hanzo_mcp/tools/filesystem/grep.py +9 -27
  86. hanzo_mcp/tools/filesystem/multi_edit.py +6 -18
  87. hanzo_mcp/tools/filesystem/read.py +8 -26
  88. hanzo_mcp/tools/filesystem/rules_tool.py +6 -17
  89. hanzo_mcp/tools/filesystem/search_tool.py +18 -62
  90. hanzo_mcp/tools/filesystem/symbols_tool.py +5 -15
  91. hanzo_mcp/tools/filesystem/tree.py +1 -3
  92. hanzo_mcp/tools/filesystem/watch.py +1 -3
  93. hanzo_mcp/tools/filesystem/write.py +1 -3
  94. hanzo_mcp/tools/jupyter/base.py +6 -20
  95. hanzo_mcp/tools/jupyter/jupyter.py +4 -12
  96. hanzo_mcp/tools/jupyter/notebook_edit.py +11 -35
  97. hanzo_mcp/tools/jupyter/notebook_read.py +2 -6
  98. hanzo_mcp/tools/llm/consensus_tool.py +8 -24
  99. hanzo_mcp/tools/llm/llm_manage.py +2 -6
  100. hanzo_mcp/tools/llm/llm_tool.py +17 -58
  101. hanzo_mcp/tools/llm/llm_unified.py +18 -59
  102. hanzo_mcp/tools/llm/provider_tools.py +1 -3
  103. hanzo_mcp/tools/lsp/lsp_tool.py +5 -17
  104. hanzo_mcp/tools/mcp/mcp_add.py +1 -3
  105. hanzo_mcp/tools/mcp/mcp_stats.py +1 -3
  106. hanzo_mcp/tools/mcp/mcp_tool.py +9 -23
  107. hanzo_mcp/tools/memory/__init__.py +10 -27
  108. hanzo_mcp/tools/memory/knowledge_tools.py +7 -25
  109. hanzo_mcp/tools/memory/memory_tools.py +6 -18
  110. hanzo_mcp/tools/search/find_tool.py +10 -32
  111. hanzo_mcp/tools/search/unified_search.py +24 -78
  112. hanzo_mcp/tools/shell/__init__.py +2 -2
  113. hanzo_mcp/tools/shell/auto_background.py +2 -6
  114. hanzo_mcp/tools/shell/base.py +1 -5
  115. hanzo_mcp/tools/shell/base_process.py +5 -7
  116. hanzo_mcp/tools/shell/bash_session.py +7 -24
  117. hanzo_mcp/tools/shell/bash_session_executor.py +5 -15
  118. hanzo_mcp/tools/shell/bash_tool.py +3 -7
  119. hanzo_mcp/tools/shell/command_executor.py +33 -86
  120. hanzo_mcp/tools/shell/logs.py +4 -16
  121. hanzo_mcp/tools/shell/npx.py +2 -8
  122. hanzo_mcp/tools/shell/npx_tool.py +1 -3
  123. hanzo_mcp/tools/shell/pkill.py +4 -12
  124. hanzo_mcp/tools/shell/process_tool.py +2 -8
  125. hanzo_mcp/tools/shell/processes.py +5 -17
  126. hanzo_mcp/tools/shell/run_background.py +1 -3
  127. hanzo_mcp/tools/shell/run_command.py +1 -3
  128. hanzo_mcp/tools/shell/run_command_windows.py +1 -3
  129. hanzo_mcp/tools/shell/session_manager.py +2 -6
  130. hanzo_mcp/tools/shell/session_storage.py +2 -6
  131. hanzo_mcp/tools/shell/streaming_command.py +7 -23
  132. hanzo_mcp/tools/shell/uvx.py +4 -14
  133. hanzo_mcp/tools/shell/uvx_background.py +2 -6
  134. hanzo_mcp/tools/shell/uvx_tool.py +1 -3
  135. hanzo_mcp/tools/shell/zsh_tool.py +12 -20
  136. hanzo_mcp/tools/todo/todo.py +1 -3
  137. hanzo_mcp/tools/todo/todo_read.py +3 -9
  138. hanzo_mcp/tools/todo/todo_write.py +6 -18
  139. hanzo_mcp/tools/vector/__init__.py +3 -9
  140. hanzo_mcp/tools/vector/ast_analyzer.py +6 -20
  141. hanzo_mcp/tools/vector/git_ingester.py +10 -30
  142. hanzo_mcp/tools/vector/index_tool.py +3 -9
  143. hanzo_mcp/tools/vector/infinity_store.py +7 -27
  144. hanzo_mcp/tools/vector/mock_infinity.py +1 -3
  145. hanzo_mcp/tools/vector/project_manager.py +4 -12
  146. hanzo_mcp/tools/vector/vector.py +2 -6
  147. hanzo_mcp/tools/vector/vector_index.py +8 -8
  148. hanzo_mcp/tools/vector/vector_search.py +7 -21
  149. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/METADATA +2 -2
  150. hanzo_mcp-0.8.14.dist-info/RECORD +193 -0
  151. hanzo_mcp-0.8.11.dist-info/RECORD +0 -193
  152. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/WHEEL +0 -0
  153. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/entry_points.txt +0 -0
  154. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/top_level.txt +0 -0
@@ -129,9 +129,7 @@ class ASTMultiEdit(BaseTool):
129
129
 
130
130
  return parser.parse(bytes(content, "utf-8"))
131
131
 
132
- def _find_references(
133
- self, symbol: str, file_path: str, project_root: Optional[str] = None
134
- ) -> List[ASTMatch]:
132
+ def _find_references(self, symbol: str, file_path: str, project_root: Optional[str] = None) -> List[ASTMatch]:
135
133
  """Find all references to a symbol across the project."""
136
134
  matches = []
137
135
 
@@ -149,9 +147,7 @@ class ASTMultiEdit(BaseTool):
149
147
 
150
148
  return matches
151
149
 
152
- def _get_reference_patterns(
153
- self, symbol: str, file_path: str
154
- ) -> List[Dict[str, Any]]:
150
+ def _get_reference_patterns(self, symbol: str, file_path: str) -> List[Dict[str, Any]]:
155
151
  """Get language-specific patterns for finding references."""
156
152
  ext = Path(file_path).suffix.lower()
157
153
  lang = self.languages.get(ext, "generic")
@@ -262,9 +258,7 @@ class ASTMultiEdit(BaseTool):
262
258
  matches.extend(self._query_ast(tree, pattern, file_path, content))
263
259
  else:
264
260
  # Fallback to text search
265
- matches.extend(
266
- self._text_search(content, pattern["query"], file_path)
267
- )
261
+ matches.extend(self._text_search(content, pattern["query"], file_path))
268
262
 
269
263
  except Exception:
270
264
  continue
@@ -313,9 +307,7 @@ class ASTMultiEdit(BaseTool):
313
307
 
314
308
  return matches
315
309
 
316
- def _get_parent_context(
317
- self, node: tree_sitter.Node, content: str
318
- ) -> Optional[str]:
310
+ def _get_parent_context(self, node: tree_sitter.Node, content: str) -> Optional[str]:
319
311
  """Get parent context for better understanding."""
320
312
  parent = node.parent
321
313
  if parent:
@@ -335,9 +327,7 @@ class ASTMultiEdit(BaseTool):
335
327
 
336
328
  return None
337
329
 
338
- def _text_search(
339
- self, content: str, pattern: str, file_path: str
340
- ) -> List[ASTMatch]:
330
+ def _text_search(self, content: str, pattern: str, file_path: str) -> List[ASTMatch]:
341
331
  """Fallback text search."""
342
332
  matches = []
343
333
  lines = content.split("\n")
@@ -412,18 +402,14 @@ class ASTMultiEdit(BaseTool):
412
402
 
413
403
  return str(path.parent)
414
404
 
415
- def _group_matches_by_file(
416
- self, matches: List[ASTMatch]
417
- ) -> Dict[str, List[ASTMatch]]:
405
+ def _group_matches_by_file(self, matches: List[ASTMatch]) -> Dict[str, List[ASTMatch]]:
418
406
  """Group matches by file for efficient editing."""
419
407
  grouped = defaultdict(list)
420
408
  for match in matches:
421
409
  grouped[match.file_path].append(match)
422
410
  return grouped
423
411
 
424
- def _create_unique_context(
425
- self, content: str, match: ASTMatch, context_lines: int
426
- ) -> str:
412
+ def _create_unique_context(self, content: str, match: ASTMatch, context_lines: int) -> str:
427
413
  """Create unique context for edit identification."""
428
414
  lines = content.split("\n")
429
415
 
@@ -499,27 +485,20 @@ class ASTMultiEdit(BaseTool):
499
485
  pattern = {"query": edit_op.old_string, "type": "text"}
500
486
  matches = self._query_ast(tree, pattern, file_path, content)
501
487
  else:
502
- matches = self._text_search(
503
- content, edit_op.old_string, file_path
504
- )
488
+ matches = self._text_search(content, edit_op.old_string, file_path)
505
489
 
506
490
  # Filter by node types if specified
507
491
  if edit_op.node_types:
508
492
  matches = [m for m in matches if m.node_type in edit_op.node_types]
509
493
 
510
494
  # Check expected count
511
- if (
512
- edit_op.expect_count is not None
513
- and len(matches) != edit_op.expect_count
514
- ):
495
+ if edit_op.expect_count is not None and len(matches) != edit_op.expect_count:
515
496
  results["errors"].append(
516
497
  {
517
498
  "edit": edit_op.old_string,
518
499
  "expected": edit_op.expect_count,
519
500
  "found": len(matches),
520
- "locations": [
521
- f"{m.file_path}:{m.line_start}" for m in matches[:5]
522
- ],
501
+ "locations": [f"{m.file_path}:{m.line_start}" for m in matches[:5]],
523
502
  }
524
503
  )
525
504
  continue
@@ -547,9 +526,7 @@ class ASTMultiEdit(BaseTool):
547
526
  success = await self._apply_file_changes(file_path, changes)
548
527
  if success:
549
528
  results["edits_applied"] += len(changes)
550
- results["changes"].append(
551
- {"file": file_path, "edits": len(changes)}
552
- )
529
+ results["changes"].append({"file": file_path, "edits": len(changes)})
553
530
  except Exception as e:
554
531
  results["errors"].append({"file": file_path, "error": str(e)})
555
532
 
@@ -564,9 +541,7 @@ class ASTMultiEdit(BaseTool):
564
541
  grouped[match.file_path].append((edit_op, match))
565
542
  return grouped
566
543
 
567
- async def _apply_file_changes(
568
- self, file_path: str, changes: List[Tuple[EditOperation, ASTMatch]]
569
- ) -> bool:
544
+ async def _apply_file_changes(self, file_path: str, changes: List[Tuple[EditOperation, ASTMatch]]) -> bool:
570
545
  """Apply changes to a single file."""
571
546
  with open(file_path, "r", encoding="utf-8") as f:
572
547
  content = f.read()
@@ -600,9 +575,7 @@ class ASTMultiEdit(BaseTool):
600
575
 
601
576
  return True
602
577
 
603
- def _generate_preview(
604
- self, matches: List[Tuple[EditOperation, ASTMatch]], page_size: int
605
- ) -> List[Dict[str, Any]]:
578
+ def _generate_preview(self, matches: List[Tuple[EditOperation, ASTMatch]], page_size: int) -> List[Dict[str, Any]]:
606
579
  """Generate preview of changes."""
607
580
  preview = []
608
581
 
@@ -625,9 +598,7 @@ class ASTMultiEdit(BaseTool):
625
598
 
626
599
  return preview
627
600
 
628
- def _fallback_to_basic_edit(
629
- self, file_path: str, edits: List[Dict[str, Any]]
630
- ) -> MCPResourceDocument:
601
+ def _fallback_to_basic_edit(self, file_path: str, edits: List[Dict[str, Any]]) -> MCPResourceDocument:
631
602
  """Fallback to basic multi-edit when treesitter not available."""
632
603
  # Delegate to existing multi_edit tool
633
604
  from hanzo_mcp.tools.filesystem.multi_edit import MultiEdit
@@ -21,9 +21,7 @@ class FilesystemBaseTool(FileSystemTool, ABC):
21
21
  the base functionality in FileSystemTool.
22
22
  """
23
23
 
24
- async def check_path_allowed(
25
- self, path: str, tool_ctx: Any, error_prefix: str = "Error"
26
- ) -> tuple[bool, str]:
24
+ async def check_path_allowed(self, path: str, tool_ctx: Any, error_prefix: str = "Error") -> tuple[bool, str]:
27
25
  """Check if a path is allowed and log an error if not.
28
26
 
29
27
  Args:
@@ -40,9 +38,7 @@ class FilesystemBaseTool(FileSystemTool, ABC):
40
38
  return False, f"{error_prefix}: {message}"
41
39
  return True, ""
42
40
 
43
- async def check_path_exists(
44
- self, path: str, tool_ctx: Any, error_prefix: str = "Error"
45
- ) -> tuple[bool, str]:
41
+ async def check_path_exists(self, path: str, tool_ctx: Any, error_prefix: str = "Error") -> tuple[bool, str]:
46
42
  """Check if a path exists and log an error if not.
47
43
 
48
44
  Args:
@@ -60,9 +56,7 @@ class FilesystemBaseTool(FileSystemTool, ABC):
60
56
  return False, f"{error_prefix}: {message}"
61
57
  return True, ""
62
58
 
63
- async def check_is_file(
64
- self, path: str, tool_ctx: Any, error_prefix: str = "Error"
65
- ) -> tuple[bool, str]:
59
+ async def check_is_file(self, path: str, tool_ctx: Any, error_prefix: str = "Error") -> tuple[bool, str]:
66
60
  """Check if a path is a file and log an error if not.
67
61
 
68
62
  Args:
@@ -80,9 +74,7 @@ class FilesystemBaseTool(FileSystemTool, ABC):
80
74
  return False, f"{error_prefix}: {message}"
81
75
  return True, ""
82
76
 
83
- async def check_is_directory(
84
- self, path: str, tool_ctx: Any, error_prefix: str = "Error"
85
- ) -> tuple[bool, str]:
77
+ async def check_is_directory(self, path: str, tool_ctx: Any, error_prefix: str = "Error") -> tuple[bool, str]:
86
78
  """Check if a path is a directory and log an error if not.
87
79
 
88
80
  Args:
@@ -80,10 +80,7 @@ class BatchSearchResults:
80
80
  return {
81
81
  "query": self.query,
82
82
  "total_results": self.total_results,
83
- "results_by_type": {
84
- k.value: [r.to_dict() for r in v]
85
- for k, v in self.results_by_type.items()
86
- },
83
+ "results_by_type": {k.value: [r.to_dict() for r in v] for k, v in self.results_by_type.items()},
87
84
  "combined_results": [r.to_dict() for r in self.combined_results],
88
85
  "search_time_ms": self.search_time_ms,
89
86
  }
@@ -96,12 +93,8 @@ Queries = Annotated[
96
93
  SearchPath = Annotated[str, Field(description="Path to search in", default=".")]
97
94
  Include = Annotated[str, Field(description="File pattern to include", default="*")]
98
95
  MaxResults = Annotated[int, Field(description="Maximum results per query", default=20)]
99
- IncludeContext = Annotated[
100
- bool, Field(description="Include function/method context", default=True)
101
- ]
102
- CombineResults = Annotated[
103
- bool, Field(description="Combine and deduplicate results", default=True)
104
- ]
96
+ IncludeContext = Annotated[bool, Field(description="Include function/method context", default=True)]
97
+ CombineResults = Annotated[bool, Field(description="Combine and deduplicate results", default=True)]
105
98
 
106
99
 
107
100
  class BatchSearchParams(TypedDict):
@@ -212,9 +205,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
212
205
 
213
206
  # If pattern contains natural language, prioritize vector search
214
207
  words = pattern.split()
215
- if len(words) > 2 and not any(
216
- c in pattern for c in ["(", ")", "{", "}", "[", "]"]
217
- ):
208
+ if len(words) > 2 and not any(c in pattern for c in ["(", ")", "{", "}", "[", "]"]):
218
209
  use_vector = True
219
210
 
220
211
  return use_vector, use_ast, use_symbol
@@ -227,9 +218,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
227
218
 
228
219
  try:
229
220
  # Use the existing grep tool
230
- grep_result = await self.grep_tool.call(
231
- tool_ctx.mcp_context, pattern=pattern, path=path, include=include
232
- )
221
+ grep_result = await self.grep_tool.call(tool_ctx.mcp_context, pattern=pattern, path=path, include=include)
233
222
 
234
223
  results = []
235
224
  if "Found" in grep_result and "matches" in grep_result:
@@ -265,9 +254,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
265
254
  await tool_ctx.error(f"Grep search failed: {str(e)}")
266
255
  return []
267
256
 
268
- async def _run_vector_search(
269
- self, pattern: str, path: str, tool_ctx, max_results: int
270
- ) -> List[SearchResult]:
257
+ async def _run_vector_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
271
258
  """Run vector search and convert results."""
272
259
  if not self.vector_tool:
273
260
  return []
@@ -373,11 +360,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
373
360
  content=content,
374
361
  search_type=SearchType.AST,
375
362
  score=0.9, # High score for AST matches
376
- context=(
377
- "\n".join(context_lines)
378
- if context_lines
379
- else None
380
- ),
363
+ context=("\n".join(context_lines) if context_lines else None),
381
364
  )
382
365
  results.append(result)
383
366
 
@@ -396,9 +379,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
396
379
  await tool_ctx.error(f"AST search failed: {str(e)}")
397
380
  return []
398
381
 
399
- async def _run_symbol_search(
400
- self, pattern: str, path: str, tool_ctx, max_results: int
401
- ) -> List[SearchResult]:
382
+ async def _run_symbol_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
402
383
  """Run symbol search using AST analysis."""
403
384
  await tool_ctx.info(f"Running symbol search for: {pattern}")
404
385
 
@@ -414,9 +395,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
414
395
  # Look for source files
415
396
  for ext in [".py", ".js", ".ts", ".java", ".cpp", ".c"]:
416
397
  files_to_check.extend(path_obj.rglob(f"*{ext}"))
417
- files_to_check = [
418
- str(f) for f in files_to_check[:50]
419
- ] # Limit for performance
398
+ files_to_check = [str(f) for f in files_to_check[:50]] # Limit for performance
420
399
 
421
400
  # Analyze files for symbols
422
401
  for file_path in files_to_check:
@@ -439,11 +418,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
439
418
  file_path=symbol.file_path,
440
419
  line_number=symbol.line_start,
441
420
  content=f"{symbol.type} {symbol.name}"
442
- + (
443
- f" - {symbol.docstring[:100]}..."
444
- if symbol.docstring
445
- else ""
446
- ),
421
+ + (f" - {symbol.docstring[:100]}..." if symbol.docstring else ""),
447
422
  search_type=SearchType.SYMBOL,
448
423
  score=0.95, # Very high score for symbol matches
449
424
  symbol_info=symbol,
@@ -464,9 +439,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
464
439
  await tool_ctx.error(f"Symbol search failed: {str(e)}")
465
440
  return []
466
441
 
467
- async def _add_function_context(
468
- self, results: List[SearchResult], tool_ctx
469
- ) -> List[SearchResult]:
442
+ async def _add_function_context(self, results: List[SearchResult], tool_ctx) -> List[SearchResult]:
470
443
  """Add function/method context to results where relevant."""
471
444
  enhanced_results = []
472
445
 
@@ -488,16 +461,10 @@ Perfect for comprehensive code analysis and refactoring tasks."""
488
461
  if file_ast:
489
462
  # Find symbol containing this line
490
463
  for symbol in file_ast.symbols:
491
- if (
492
- symbol.line_start
493
- <= result.line_number
494
- <= symbol.line_end
495
- and symbol.type
496
- in [
497
- "function",
498
- "method",
499
- ]
500
- ):
464
+ if symbol.line_start <= result.line_number <= symbol.line_end and symbol.type in [
465
+ "function",
466
+ "method",
467
+ ]:
501
468
  enhanced_result = SearchResult(
502
469
  file_path=result.file_path,
503
470
  line_number=result.line_number,
@@ -510,17 +477,13 @@ Perfect for comprehensive code analysis and refactoring tasks."""
510
477
  )
511
478
  break
512
479
  except Exception as e:
513
- await tool_ctx.warning(
514
- f"Could not add context for {result.file_path}: {str(e)}"
515
- )
480
+ await tool_ctx.warning(f"Could not add context for {result.file_path}: {str(e)}")
516
481
 
517
482
  enhanced_results.append(enhanced_result)
518
483
 
519
484
  return enhanced_results
520
485
 
521
- def _combine_and_rank_results(
522
- self, results_by_type: Dict[SearchType, List[SearchResult]]
523
- ) -> List[SearchResult]:
486
+ def _combine_and_rank_results(self, results_by_type: Dict[SearchType, List[SearchResult]]) -> List[SearchResult]:
524
487
  """Combine results from different search types and rank by relevance."""
525
488
  all_results = []
526
489
  seen_combinations = set()
@@ -552,8 +515,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
552
515
 
553
516
  # Replace existing if: higher priority type, or same priority but higher score
554
517
  if result_priority > existing_priority or (
555
- result_priority == existing_priority
556
- and result.score > existing.score
518
+ result_priority == existing_priority and result.score > existing.score
557
519
  ):
558
520
  # Replace the entire result to preserve type
559
521
  idx = all_results.index(existing)
@@ -561,9 +523,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
561
523
  else:
562
524
  # Still merge useful information
563
525
  existing.context = existing.context or result.context
564
- existing.symbol_info = (
565
- existing.symbol_info or result.symbol_info
566
- )
526
+ existing.symbol_info = existing.symbol_info or result.symbol_info
567
527
  break
568
528
 
569
529
  # Sort by score (descending) then by search type priority
@@ -574,9 +534,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
574
534
  SearchType.VECTOR: 1,
575
535
  }
576
536
 
577
- all_results.sort(
578
- key=lambda r: (r.score, type_priority[r.search_type]), reverse=True
579
- )
537
+ all_results.sort(key=lambda r: (r.score, type_priority[r.search_type]), reverse=True)
580
538
 
581
539
  return all_results
582
540
 
@@ -612,9 +570,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
612
570
  if not exists:
613
571
  return error_msg
614
572
 
615
- await tool_ctx.info(
616
- f"Starting batch search with {len(queries)} queries in {path}"
617
- )
573
+ await tool_ctx.info(f"Starting batch search with {len(queries)} queries in {path}")
618
574
 
619
575
  # Run all queries in parallel
620
576
  search_tasks = []
@@ -627,44 +583,26 @@ Perfect for comprehensive code analysis and refactoring tasks."""
627
583
  if query_type == "grep":
628
584
  pattern = query.get("pattern")
629
585
  if pattern:
630
- search_tasks.append(
631
- self._run_grep_search(
632
- pattern, path, include, tool_ctx, max_results
633
- )
634
- )
586
+ search_tasks.append(self._run_grep_search(pattern, path, include, tool_ctx, max_results))
635
587
 
636
588
  elif query_type == "grep_ast":
637
589
  pattern = query.get("pattern")
638
590
  if pattern:
639
- search_tasks.append(
640
- self._run_ast_search(
641
- pattern, path, include, tool_ctx, max_results
642
- )
643
- )
591
+ search_tasks.append(self._run_ast_search(pattern, path, include, tool_ctx, max_results))
644
592
 
645
593
  elif query_type == "vector_search" and self.vector_tool:
646
594
  search_query = query.get("query") or query.get("pattern")
647
595
  if search_query:
648
- search_tasks.append(
649
- self._run_vector_search(
650
- search_query, path, tool_ctx, max_results
651
- )
652
- )
596
+ search_tasks.append(self._run_vector_search(search_query, path, tool_ctx, max_results))
653
597
 
654
598
  elif query_type == "git_search":
655
599
  pattern = query.get("pattern")
656
600
  search_type = query.get("search_type", "content")
657
601
  if pattern:
658
- search_tasks.append(
659
- self._run_git_search(
660
- pattern, path, search_type, tool_ctx, max_results
661
- )
662
- )
602
+ search_tasks.append(self._run_git_search(pattern, path, search_type, tool_ctx, max_results))
663
603
 
664
604
  else:
665
- await tool_ctx.warning(
666
- f"Unknown or unavailable search type: {query_type}"
667
- )
605
+ await tool_ctx.warning(f"Unknown or unavailable search type: {query_type}")
668
606
 
669
607
  # Execute all searches in parallel
670
608
  search_results = await asyncio.gather(*search_tasks, return_exceptions=True)
@@ -689,9 +627,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
689
627
 
690
628
  # Add context if requested
691
629
  if include_context:
692
- combined_results = await self._add_context_to_results(
693
- combined_results, tool_ctx
694
- )
630
+ combined_results = await self._add_context_to_results(combined_results, tool_ctx)
695
631
 
696
632
  end_time = time.time()
697
633
  search_time_ms = (end_time - start_time) * 1000
@@ -700,27 +636,17 @@ Perfect for comprehensive code analysis and refactoring tasks."""
700
636
  combined_results.sort(key=lambda r: r.score, reverse=True)
701
637
 
702
638
  # Limit total results
703
- combined_results = combined_results[
704
- : max_results * 2
705
- ] # Allow more when combining
639
+ combined_results = combined_results[: max_results * 2] # Allow more when combining
706
640
 
707
641
  # Create batch results object
708
642
  batch_results = BatchSearchResults(
709
643
  query=f"Batch search with {len(queries)} queries",
710
644
  total_results=len(combined_results),
711
645
  results_by_type={
712
- SearchType.GREP: [
713
- r for r in combined_results if r.search_type == SearchType.GREP
714
- ],
715
- SearchType.VECTOR: [
716
- r for r in combined_results if r.search_type == SearchType.VECTOR
717
- ],
718
- SearchType.AST: [
719
- r for r in combined_results if r.search_type == SearchType.AST
720
- ],
721
- SearchType.GIT: [
722
- r for r in combined_results if r.search_type == SearchType.GIT
723
- ],
646
+ SearchType.GREP: [r for r in combined_results if r.search_type == SearchType.GREP],
647
+ SearchType.VECTOR: [r for r in combined_results if r.search_type == SearchType.VECTOR],
648
+ SearchType.AST: [r for r in combined_results if r.search_type == SearchType.AST],
649
+ SearchType.GIT: [r for r in combined_results if r.search_type == SearchType.GIT],
724
650
  },
725
651
  combined_results=combined_results,
726
652
  search_time_ms=search_time_ms,
@@ -802,16 +728,12 @@ Perfect for comprehensive code analysis and refactoring tasks."""
802
728
 
803
729
  return combined
804
730
 
805
- async def _add_context_to_results(
806
- self, results: List[SearchResult], tool_ctx
807
- ) -> List[SearchResult]:
731
+ async def _add_context_to_results(self, results: List[SearchResult], tool_ctx) -> List[SearchResult]:
808
732
  """Add function/method context to results."""
809
733
  # This is a simplified version - you could enhance with full AST context
810
734
  return await self._add_function_context(results, tool_ctx)
811
735
 
812
- def _format_batch_results(
813
- self, results: BatchSearchResults, query_info: List[Dict]
814
- ) -> str:
736
+ def _format_batch_results(self, results: BatchSearchResults, query_info: List[Dict]) -> str:
815
737
  """Format batch search results for display."""
816
738
  output = []
817
739
 
@@ -859,9 +781,7 @@ Perfect for comprehensive code analysis and refactoring tasks."""
859
781
  score_str = f"[{result.search_type.value} {result.score:.2f}]"
860
782
 
861
783
  if result.line_number:
862
- output.append(
863
- f" {result.line_number:>4}: {score_str} {result.content}"
864
- )
784
+ output.append(f" {result.line_number:>4}: {score_str} {result.content}")
865
785
  else:
866
786
  output.append(f" {score_str} {result.content}")
867
787
 
@@ -159,15 +159,11 @@ Only works within allowed directories."""
159
159
  # Process based on whether path is a file or directory
160
160
  if input_path.is_file():
161
161
  # Single file search
162
- if file_pattern == "*" or fnmatch.fnmatch(
163
- input_path.name, file_pattern
164
- ):
162
+ if file_pattern == "*" or fnmatch.fnmatch(input_path.name, file_pattern):
165
163
  matching_files.append(input_path)
166
164
  await tool_ctx.info(f"Searching single file: {path}")
167
165
  else:
168
- await tool_ctx.info(
169
- f"File does not match pattern '{file_pattern}': {path}"
170
- )
166
+ await tool_ctx.info(f"File does not match pattern '{file_pattern}': {path}")
171
167
  return f"File does not match pattern '{file_pattern}': {path}"
172
168
  elif input_path.is_dir():
173
169
  # Directory search - optimized file finding
@@ -186,9 +182,7 @@ Only works within allowed directories."""
186
182
  for entry in input_path.rglob("*"):
187
183
  entry_path = str(entry)
188
184
  if entry_path in allowed_paths and entry.is_file():
189
- if file_pattern == "*" or fnmatch.fnmatch(
190
- entry.name, file_pattern
191
- ):
185
+ if file_pattern == "*" or fnmatch.fnmatch(entry.name, file_pattern):
192
186
  matching_files.append(entry)
193
187
 
194
188
  await tool_ctx.info(f"Found {len(matching_files)} matching files")
@@ -251,9 +245,7 @@ Only works within allowed directories."""
251
245
  )
252
246
  message = f"Dry run: {replacements_made} replacements of '{pattern}' with '{replacement}' would be made in {files_modified} files:"
253
247
  else:
254
- await tool_ctx.info(
255
- f"Made {replacements_made} replacements in {files_modified} files"
256
- )
248
+ await tool_ctx.info(f"Made {replacements_made} replacements in {files_modified} files")
257
249
  message = f"Made {replacements_made} replacements of '{pattern}' with '{replacement}' in {files_modified} files:"
258
250
 
259
251
  return message + "\n\n" + "\n".join(results)
@@ -166,16 +166,8 @@ diff a.json b.json --ignore-whitespace"""
166
166
  output.extend(diff_lines)
167
167
 
168
168
  # Add summary
169
- additions = sum(
170
- 1
171
- for line in diff_lines
172
- if line.startswith("+") and not line.startswith("+++")
173
- )
174
- deletions = sum(
175
- 1
176
- for line in diff_lines
177
- if line.startswith("-") and not line.startswith("---")
178
- )
169
+ additions = sum(1 for line in diff_lines if line.startswith("+") and not line.startswith("+++"))
170
+ deletions = sum(1 for line in diff_lines if line.startswith("-") and not line.startswith("---"))
179
171
 
180
172
  output.append("")
181
173
  output.append("=" * 60)
@@ -112,9 +112,7 @@ requested. Only works within allowed directories."""
112
112
  await tool_ctx.error(path_validation.error_message)
113
113
  return f"Error: {path_validation.error_message}"
114
114
 
115
- await tool_ctx.info(
116
- f"Getting directory tree: {path} (depth: {depth}, include_filtered: {include_filtered})"
117
- )
115
+ await tool_ctx.info(f"Getting directory tree: {path} (depth: {depth}, include_filtered: {include_filtered})")
118
116
 
119
117
  # Check if path is allowed
120
118
  allowed, error_msg = await self.check_path_allowed(path, tool_ctx)
@@ -153,9 +151,7 @@ requested. Only works within allowed directories."""
153
151
  }
154
152
 
155
153
  # Log filtering settings
156
- await tool_ctx.info(
157
- f"Directory tree filtering: include_filtered={include_filtered}"
158
- )
154
+ await tool_ctx.info(f"Directory tree filtering: include_filtered={include_filtered}")
159
155
 
160
156
  # Check if a directory should be filtered
161
157
  def should_filter(current_path: Path) -> bool:
@@ -165,9 +161,7 @@ requested. Only works within allowed directories."""
165
161
  return False
166
162
 
167
163
  # Filter based on directory name if filtering is enabled
168
- return (
169
- current_path.name in FILTERED_DIRECTORIES and not include_filtered
170
- )
164
+ return current_path.name in FILTERED_DIRECTORIES and not include_filtered
171
165
 
172
166
  # Track stats for summary
173
167
  stats = {
@@ -178,9 +172,7 @@ requested. Only works within allowed directories."""
178
172
  }
179
173
 
180
174
  # Build the tree recursively
181
- async def build_tree(
182
- current_path: Path, current_depth: int = 0
183
- ) -> list[dict[str, Any]]:
175
+ async def build_tree(current_path: Path, current_depth: int = 0) -> list[dict[str, Any]]:
184
176
  result: list[dict[str, Any]] = []
185
177
 
186
178
  # Skip processing if path isn't allowed
@@ -189,9 +181,7 @@ requested. Only works within allowed directories."""
189
181
 
190
182
  try:
191
183
  # Sort entries: directories first, then files alphabetically
192
- entries = sorted(
193
- current_path.iterdir(), key=lambda x: (not x.is_dir(), x.name)
194
- )
184
+ entries = sorted(current_path.iterdir(), key=lambda x: (not x.is_dir(), x.name))
195
185
 
196
186
  for entry in entries:
197
187
  # Skip entries that aren't allowed
@@ -220,9 +210,7 @@ requested. Only works within allowed directories."""
220
210
  continue
221
211
 
222
212
  # Process children recursively with depth increment
223
- entry_data["children"] = await build_tree(
224
- entry, current_depth + 1
225
- )
213
+ entry_data["children"] = await build_tree(entry, current_depth + 1)
226
214
  result.append(entry_data)
227
215
  else:
228
216
  # Files should be at the same level check as directories
@@ -237,9 +225,7 @@ requested. Only works within allowed directories."""
237
225
  return result
238
226
 
239
227
  # Format the tree as a simple indented structure
240
- def format_tree(
241
- tree_data: list[dict[str, Any]], level: int = 0
242
- ) -> list[str]:
228
+ def format_tree(tree_data: list[dict[str, Any]], level: int = 0) -> list[str]:
243
229
  lines = []
244
230
 
245
231
  for item in tree_data:
@@ -249,9 +235,7 @@ requested. Only works within allowed directories."""
249
235
  # Format based on type
250
236
  if item["type"] == "directory":
251
237
  if "skipped" in item:
252
- lines.append(
253
- f"{indent}{item['name']}/ [skipped - {item['skipped']}]"
254
- )
238
+ lines.append(f"{indent}{item['name']}/ [skipped - {item['skipped']}]")
255
239
  else:
256
240
  lines.append(f"{indent}{item['name']}/")
257
241
  # Add children with increased indentation if present
@@ -310,6 +294,4 @@ requested. Only works within allowed directories."""
310
294
  depth: Depth = 3,
311
295
  include_filtered: IncludeFiltered = False,
312
296
  ) -> str:
313
- return await tool_self.call(
314
- ctx, path=path, depth=depth, include_filtered=include_filtered
315
- )
297
+ return await tool_self.call(ctx, path=path, depth=depth, include_filtered=include_filtered)