hanzo-mcp 0.6.13__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (62) hide show
  1. hanzo_mcp/analytics/__init__.py +5 -0
  2. hanzo_mcp/analytics/posthog_analytics.py +364 -0
  3. hanzo_mcp/cli.py +3 -3
  4. hanzo_mcp/cli_enhanced.py +3 -3
  5. hanzo_mcp/config/settings.py +1 -1
  6. hanzo_mcp/config/tool_config.py +18 -4
  7. hanzo_mcp/server.py +34 -1
  8. hanzo_mcp/tools/__init__.py +65 -2
  9. hanzo_mcp/tools/agent/__init__.py +84 -3
  10. hanzo_mcp/tools/agent/agent_tool.py +102 -4
  11. hanzo_mcp/tools/agent/agent_tool_v2.py +459 -0
  12. hanzo_mcp/tools/agent/clarification_protocol.py +220 -0
  13. hanzo_mcp/tools/agent/clarification_tool.py +68 -0
  14. hanzo_mcp/tools/agent/claude_cli_tool.py +125 -0
  15. hanzo_mcp/tools/agent/claude_desktop_auth.py +508 -0
  16. hanzo_mcp/tools/agent/cli_agent_base.py +191 -0
  17. hanzo_mcp/tools/agent/code_auth.py +436 -0
  18. hanzo_mcp/tools/agent/code_auth_tool.py +194 -0
  19. hanzo_mcp/tools/agent/codex_cli_tool.py +123 -0
  20. hanzo_mcp/tools/agent/critic_tool.py +376 -0
  21. hanzo_mcp/tools/agent/gemini_cli_tool.py +128 -0
  22. hanzo_mcp/tools/agent/grok_cli_tool.py +128 -0
  23. hanzo_mcp/tools/agent/iching_tool.py +380 -0
  24. hanzo_mcp/tools/agent/network_tool.py +273 -0
  25. hanzo_mcp/tools/agent/prompt.py +62 -20
  26. hanzo_mcp/tools/agent/review_tool.py +433 -0
  27. hanzo_mcp/tools/agent/swarm_tool.py +535 -0
  28. hanzo_mcp/tools/agent/swarm_tool_v2.py +594 -0
  29. hanzo_mcp/tools/common/base.py +1 -0
  30. hanzo_mcp/tools/common/batch_tool.py +102 -10
  31. hanzo_mcp/tools/common/fastmcp_pagination.py +369 -0
  32. hanzo_mcp/tools/common/forgiving_edit.py +243 -0
  33. hanzo_mcp/tools/common/paginated_base.py +230 -0
  34. hanzo_mcp/tools/common/paginated_response.py +307 -0
  35. hanzo_mcp/tools/common/pagination.py +226 -0
  36. hanzo_mcp/tools/common/tool_list.py +3 -0
  37. hanzo_mcp/tools/common/truncate.py +101 -0
  38. hanzo_mcp/tools/filesystem/__init__.py +29 -0
  39. hanzo_mcp/tools/filesystem/ast_multi_edit.py +562 -0
  40. hanzo_mcp/tools/filesystem/directory_tree_paginated.py +338 -0
  41. hanzo_mcp/tools/lsp/__init__.py +5 -0
  42. hanzo_mcp/tools/lsp/lsp_tool.py +512 -0
  43. hanzo_mcp/tools/memory/__init__.py +76 -0
  44. hanzo_mcp/tools/memory/knowledge_tools.py +518 -0
  45. hanzo_mcp/tools/memory/memory_tools.py +456 -0
  46. hanzo_mcp/tools/search/__init__.py +6 -0
  47. hanzo_mcp/tools/search/find_tool.py +581 -0
  48. hanzo_mcp/tools/search/unified_search.py +953 -0
  49. hanzo_mcp/tools/shell/__init__.py +5 -0
  50. hanzo_mcp/tools/shell/auto_background.py +203 -0
  51. hanzo_mcp/tools/shell/base_process.py +53 -27
  52. hanzo_mcp/tools/shell/bash_tool.py +17 -33
  53. hanzo_mcp/tools/shell/npx_tool.py +15 -32
  54. hanzo_mcp/tools/shell/streaming_command.py +594 -0
  55. hanzo_mcp/tools/shell/uvx_tool.py +15 -32
  56. hanzo_mcp/types.py +23 -0
  57. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/METADATA +228 -71
  58. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/RECORD +61 -24
  59. hanzo_mcp-0.6.13.dist-info/licenses/LICENSE +0 -21
  60. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/WHEEL +0 -0
  61. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
  62. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,581 @@
1
+ """Fast file finding tool using ffind and intelligent caching."""
2
+
3
+ import os
4
+ import time
5
+ import fnmatch
6
+ from typing import List, Optional, Dict, Any, Set
7
+ from pathlib import Path
8
+ from dataclasses import dataclass
9
+ import subprocess
10
+ import json
11
+ from datetime import datetime
12
+
13
+ from hanzo_mcp.tools.common.base import BaseTool
14
+ from hanzo_mcp.tools.common.paginated_response import AutoPaginatedResponse
15
+ from hanzo_mcp.tools.common.decorators import with_context_normalization
16
+ from hanzo_mcp.types import MCPResourceDocument
17
+
18
+ try:
19
+ import ffind
20
+ FFIND_AVAILABLE = True
21
+ except ImportError:
22
+ FFIND_AVAILABLE = False
23
+
24
+
25
+ @dataclass
26
+ class FileMatch:
27
+ """Represents a found file."""
28
+ path: str
29
+ name: str
30
+ size: int
31
+ modified: float
32
+ is_dir: bool
33
+ extension: str
34
+ depth: int
35
+
36
+ def to_dict(self) -> Dict[str, Any]:
37
+ return {
38
+ "path": self.path,
39
+ "name": self.name,
40
+ "size": self.size,
41
+ "modified": datetime.fromtimestamp(self.modified).isoformat(),
42
+ "is_dir": self.is_dir,
43
+ "extension": self.extension,
44
+ "depth": self.depth
45
+ }
46
+
47
+
48
+ class FindTool(BaseTool):
49
+ """Fast file and directory finding tool.
50
+
51
+ This tool is optimized for quickly finding files and directories by name,
52
+ pattern, or attributes. It uses ffind (when available) for blazing fast
53
+ performance and falls back to optimized Python implementation.
54
+
55
+ Key features:
56
+ - Lightning fast file discovery
57
+ - Smart pattern matching (glob, regex, fuzzy)
58
+ - File attribute filtering (size, date, type)
59
+ - Intelligent result ranking
60
+ - Built-in caching for repeated searches
61
+ - Respects .gitignore by default
62
+ """
63
+
64
+ name = "find"
65
+ description = """Find files and directories by name, pattern, or attributes.
66
+
67
+ Examples:
68
+ - find("*.py") - Find all Python files
69
+ - find("test_", type="file") - Find files starting with test_
70
+ - find("src", type="dir") - Find directories named src
71
+ - find("TODO", in_content=True) - Find files containing TODO
72
+ - find("large", min_size="10MB") - Find files larger than 10MB
73
+ - find("recent", modified_after="1 day ago") - Recently modified files
74
+
75
+ This is the primary tool for discovering files in a project. Use it before
76
+ reading or searching within files.
77
+ """
78
+
79
+ def __init__(self):
80
+ super().__init__()
81
+ self._cache = {}
82
+ self._gitignore_cache = {}
83
+
84
+ def _parse_size(self, size_str: str) -> int:
85
+ """Parse human-readable size to bytes."""
86
+ # Order matters - check longer units first
87
+ units = [
88
+ ('TB', 1024**4), ('GB', 1024**3), ('MB', 1024**2), ('KB', 1024),
89
+ ('T', 1024**4), ('G', 1024**3), ('M', 1024**2), ('K', 1024), ('B', 1)
90
+ ]
91
+
92
+ size_str = size_str.upper().strip()
93
+ for unit, multiplier in units:
94
+ if size_str.endswith(unit):
95
+ num_str = size_str[:-len(unit)].strip()
96
+ if num_str:
97
+ try:
98
+ return int(float(num_str) * multiplier)
99
+ except ValueError:
100
+ return 0
101
+
102
+ try:
103
+ return int(size_str)
104
+ except ValueError:
105
+ return 0
106
+
107
+ def _parse_time(self, time_str: str) -> float:
108
+ """Parse human-readable time to timestamp."""
109
+ import re
110
+ from datetime import datetime, timedelta
111
+
112
+ # Handle relative times like "1 day ago", "2 hours ago"
113
+ match = re.match(r'(\d+)\s*(second|minute|hour|day|week|month|year)s?\s*ago', time_str.lower())
114
+ if match:
115
+ amount = int(match.group(1))
116
+ unit = match.group(2)
117
+
118
+ if unit == 'second':
119
+ delta = timedelta(seconds=amount)
120
+ elif unit == 'minute':
121
+ delta = timedelta(minutes=amount)
122
+ elif unit == 'hour':
123
+ delta = timedelta(hours=amount)
124
+ elif unit == 'day':
125
+ delta = timedelta(days=amount)
126
+ elif unit == 'week':
127
+ delta = timedelta(weeks=amount)
128
+ elif unit == 'month':
129
+ delta = timedelta(days=amount * 30) # Approximate
130
+ elif unit == 'year':
131
+ delta = timedelta(days=amount * 365) # Approximate
132
+
133
+ return (datetime.now() - delta).timestamp()
134
+
135
+ # Try parsing as date
136
+ try:
137
+ return datetime.fromisoformat(time_str).timestamp()
138
+ except:
139
+ return datetime.now().timestamp()
140
+
141
+ def _load_gitignore(self, root: str) -> Set[str]:
142
+ """Load and parse .gitignore patterns."""
143
+ if root in self._gitignore_cache:
144
+ return self._gitignore_cache[root]
145
+
146
+ patterns = set()
147
+ gitignore_path = Path(root) / '.gitignore'
148
+
149
+ if gitignore_path.exists():
150
+ try:
151
+ with open(gitignore_path, 'r') as f:
152
+ for line in f:
153
+ line = line.strip()
154
+ if line and not line.startswith('#'):
155
+ patterns.add(line)
156
+ except:
157
+ pass
158
+
159
+ # Add common ignore patterns
160
+ patterns.update([
161
+ '*.pyc', '__pycache__', '.git', '.svn', '.hg',
162
+ 'node_modules', '.env', '.venv', 'venv',
163
+ '*.swp', '*.swo', '.DS_Store', 'Thumbs.db'
164
+ ])
165
+
166
+ self._gitignore_cache[root] = patterns
167
+ return patterns
168
+
169
+ def _should_ignore(self, path: str, ignore_patterns: Set[str]) -> bool:
170
+ """Check if path should be ignored."""
171
+ path_obj = Path(path)
172
+
173
+ for pattern in ignore_patterns:
174
+ # Check against full path and basename
175
+ if fnmatch.fnmatch(path_obj.name, pattern):
176
+ return True
177
+ if fnmatch.fnmatch(str(path_obj), pattern):
178
+ return True
179
+
180
+ # Check if any parent directory matches
181
+ for parent in path_obj.parents:
182
+ if fnmatch.fnmatch(parent.name, pattern):
183
+ return True
184
+
185
+ return False
186
+
187
+ async def run(self,
188
+ pattern: str = "*",
189
+ path: str = ".",
190
+ type: Optional[str] = None, # "file", "dir", "any"
191
+ min_size: Optional[str] = None,
192
+ max_size: Optional[str] = None,
193
+ modified_after: Optional[str] = None,
194
+ modified_before: Optional[str] = None,
195
+ max_depth: Optional[int] = None,
196
+ case_sensitive: bool = False,
197
+ regex: bool = False,
198
+ fuzzy: bool = False,
199
+ in_content: bool = False,
200
+ follow_symlinks: bool = False,
201
+ respect_gitignore: bool = True,
202
+ max_results: int = 1000,
203
+ sort_by: str = "path", # "path", "name", "size", "modified"
204
+ reverse: bool = False,
205
+ page_size: int = 100,
206
+ page: int = 1,
207
+ **kwargs) -> MCPResourceDocument:
208
+ """Find files and directories.
209
+
210
+ Args:
211
+ pattern: Search pattern (glob by default, regex if regex=True)
212
+ path: Root directory to search from
213
+ type: Filter by type ("file", "dir", "any")
214
+ min_size: Minimum file size (e.g., "1MB", "500K")
215
+ max_size: Maximum file size
216
+ modified_after: Find files modified after this time
217
+ modified_before: Find files modified before this time
218
+ max_depth: Maximum directory depth to search
219
+ case_sensitive: Case-sensitive matching
220
+ regex: Treat pattern as regex instead of glob
221
+ fuzzy: Use fuzzy matching for pattern
222
+ in_content: Search for pattern inside files (slower)
223
+ follow_symlinks: Follow symbolic links
224
+ respect_gitignore: Respect .gitignore patterns
225
+ max_results: Maximum results to return
226
+ sort_by: Sort results by attribute
227
+ reverse: Reverse sort order
228
+ page_size: Results per page
229
+ page: Page number
230
+ """
231
+
232
+ start_time = time.time()
233
+
234
+ # Resolve path
235
+ root_path = Path(path).resolve()
236
+ if not root_path.exists():
237
+ return MCPResourceDocument(data={
238
+ "error": f"Path does not exist: {path}",
239
+ "results": []
240
+ })
241
+
242
+ # Get ignore patterns
243
+ ignore_patterns = set()
244
+ if respect_gitignore:
245
+ ignore_patterns = self._load_gitignore(str(root_path))
246
+
247
+ # Parse filters
248
+ min_size_bytes = self._parse_size(min_size) if min_size else None
249
+ max_size_bytes = self._parse_size(max_size) if max_size else None
250
+ modified_after_ts = self._parse_time(modified_after) if modified_after else None
251
+ modified_before_ts = self._parse_time(modified_before) if modified_before else None
252
+
253
+ # Collect matches
254
+ matches = []
255
+
256
+ if FFIND_AVAILABLE and not in_content:
257
+ # Use ffind for fast file discovery
258
+ matches = await self._find_with_ffind(
259
+ pattern, root_path, type, case_sensitive, regex,
260
+ max_depth, follow_symlinks, ignore_patterns
261
+ )
262
+ else:
263
+ # Fall back to Python implementation
264
+ matches = await self._find_with_python(
265
+ pattern, root_path, type, case_sensitive, regex, fuzzy,
266
+ in_content, max_depth, follow_symlinks, respect_gitignore, ignore_patterns
267
+ )
268
+
269
+ # Apply filters
270
+ filtered_matches = []
271
+ for match in matches:
272
+ # Size filters
273
+ if min_size_bytes and match.size < min_size_bytes:
274
+ continue
275
+ if max_size_bytes and match.size > max_size_bytes:
276
+ continue
277
+
278
+ # Time filters
279
+ if modified_after_ts and match.modified < modified_after_ts:
280
+ continue
281
+ if modified_before_ts and match.modified > modified_before_ts:
282
+ continue
283
+
284
+ filtered_matches.append(match)
285
+
286
+ if len(filtered_matches) >= max_results:
287
+ break
288
+
289
+ # Sort results
290
+ if sort_by == "name":
291
+ filtered_matches.sort(key=lambda m: m.name, reverse=reverse)
292
+ elif sort_by == "size":
293
+ filtered_matches.sort(key=lambda m: m.size, reverse=reverse)
294
+ elif sort_by == "modified":
295
+ filtered_matches.sort(key=lambda m: m.modified, reverse=reverse)
296
+ else: # path
297
+ filtered_matches.sort(key=lambda m: m.path, reverse=reverse)
298
+
299
+ # Paginate
300
+ total_results = len(filtered_matches)
301
+ start_idx = (page - 1) * page_size
302
+ end_idx = start_idx + page_size
303
+ page_results = filtered_matches[start_idx:end_idx]
304
+
305
+ # Format results
306
+ formatted_results = [match.to_dict() for match in page_results]
307
+
308
+ # Statistics
309
+ stats = {
310
+ "total_found": total_results,
311
+ "search_time_ms": int((time.time() - start_time) * 1000),
312
+ "search_method": "ffind" if FFIND_AVAILABLE and not in_content else "python",
313
+ "root_path": str(root_path),
314
+ "filters_applied": {
315
+ "pattern": pattern,
316
+ "type": type,
317
+ "size": {"min": min_size, "max": max_size} if min_size or max_size else None,
318
+ "modified": {"after": modified_after, "before": modified_before} if modified_after or modified_before else None,
319
+ "max_depth": max_depth,
320
+ "gitignore": respect_gitignore
321
+ }
322
+ }
323
+
324
+ return MCPResourceDocument(data={
325
+ "results": formatted_results,
326
+ "pagination": {
327
+ "page": page,
328
+ "page_size": page_size,
329
+ "total_results": total_results,
330
+ "total_pages": (total_results + page_size - 1) // page_size,
331
+ "has_next": end_idx < total_results,
332
+ "has_prev": page > 1
333
+ },
334
+ "statistics": stats
335
+ })
336
+
337
+ async def call(self, **kwargs) -> str:
338
+ """Tool interface for MCP - converts result to JSON string."""
339
+ result = await self.run(**kwargs)
340
+ return result.to_json_string()
341
+
342
+ def register(self, mcp_server) -> None:
343
+ """Register tool with MCP server."""
344
+ from mcp.server import FastMCP
345
+
346
+ @mcp_server.tool(name=self.name, description=self.description)
347
+ async def find_handler(
348
+ pattern: str,
349
+ path: str = ".",
350
+ type: Optional[str] = None,
351
+ max_results: int = 100,
352
+ max_depth: Optional[int] = None,
353
+ case_sensitive: bool = False,
354
+ regex: bool = False,
355
+ fuzzy: bool = False,
356
+ min_size: Optional[str] = None,
357
+ max_size: Optional[str] = None,
358
+ modified_after: Optional[str] = None,
359
+ modified_before: Optional[str] = None,
360
+ follow_symlinks: bool = True,
361
+ respect_gitignore: bool = True,
362
+ sort_by: str = "name",
363
+ reverse: bool = False,
364
+ page_size: int = 50,
365
+ page: int = 1,
366
+ ) -> str:
367
+ """Execute file finding."""
368
+ return await self.call(
369
+ pattern=pattern,
370
+ path=path,
371
+ type=type,
372
+ max_results=max_results,
373
+ max_depth=max_depth,
374
+ case_sensitive=case_sensitive,
375
+ regex=regex,
376
+ fuzzy=fuzzy,
377
+ min_size=min_size,
378
+ max_size=max_size,
379
+ modified_after=modified_after,
380
+ modified_before=modified_before,
381
+ follow_symlinks=follow_symlinks,
382
+ respect_gitignore=respect_gitignore,
383
+ sort_by=sort_by,
384
+ reverse=reverse,
385
+ page_size=page_size,
386
+ page=page,
387
+ )
388
+
389
+ async def _find_with_ffind(self,
390
+ pattern: str,
391
+ root: Path,
392
+ file_type: Optional[str],
393
+ case_sensitive: bool,
394
+ regex: bool,
395
+ max_depth: Optional[int],
396
+ follow_symlinks: bool,
397
+ ignore_patterns: Set[str]) -> List[FileMatch]:
398
+ """Use ffind for fast file discovery."""
399
+ matches = []
400
+
401
+ # Configure ffind
402
+ ffind_args = {
403
+ 'path': str(root),
404
+ 'pattern': pattern,
405
+ 'regex': regex,
406
+ 'case_sensitive': case_sensitive,
407
+ 'follow_symlinks': follow_symlinks,
408
+ }
409
+
410
+ if max_depth:
411
+ ffind_args['max_depth'] = max_depth
412
+
413
+ try:
414
+ # Run ffind
415
+ results = ffind.find(**ffind_args)
416
+
417
+ for result in results:
418
+ path = result['path']
419
+
420
+ # Check ignore patterns
421
+ if self._should_ignore(path, ignore_patterns):
422
+ continue
423
+
424
+ # Get file info
425
+ try:
426
+ stat = os.stat(path)
427
+ is_dir = os.path.isdir(path)
428
+
429
+ # Apply type filter
430
+ if file_type == "file" and is_dir:
431
+ continue
432
+ if file_type == "dir" and not is_dir:
433
+ continue
434
+
435
+ match = FileMatch(
436
+ path=path,
437
+ name=os.path.basename(path),
438
+ size=stat.st_size,
439
+ modified=stat.st_mtime,
440
+ is_dir=is_dir,
441
+ extension=Path(path).suffix,
442
+ depth=len(Path(path).relative_to(root).parts)
443
+ )
444
+ matches.append(match)
445
+
446
+ except OSError:
447
+ continue
448
+
449
+ except Exception as e:
450
+ # Fall back to Python implementation
451
+ return await self._find_with_python(
452
+ pattern, root, file_type, case_sensitive, regex, False,
453
+ False, max_depth, follow_symlinks, respect_gitignore, ignore_patterns
454
+ )
455
+
456
+ return matches
457
+
458
+ async def _find_with_python(self,
459
+ pattern: str,
460
+ root: Path,
461
+ file_type: Optional[str],
462
+ case_sensitive: bool,
463
+ regex: bool,
464
+ fuzzy: bool,
465
+ in_content: bool,
466
+ max_depth: Optional[int],
467
+ follow_symlinks: bool,
468
+ respect_gitignore: bool,
469
+ ignore_patterns: Set[str]) -> List[FileMatch]:
470
+ """Python implementation of file finding."""
471
+ matches = []
472
+
473
+ import re
474
+ from difflib import SequenceMatcher
475
+
476
+ # Prepare pattern matcher
477
+ if regex:
478
+ flags = 0 if case_sensitive else re.IGNORECASE
479
+ try:
480
+ pattern_re = re.compile(pattern, flags)
481
+ matcher = lambda name: pattern_re.search(name) is not None
482
+ except re.error:
483
+ matcher = lambda name: pattern in name
484
+ elif fuzzy:
485
+ pattern_lower = pattern.lower() if not case_sensitive else pattern
486
+ matcher = lambda name: SequenceMatcher(None, pattern_lower,
487
+ name.lower() if not case_sensitive else name).ratio() > 0.6
488
+ else:
489
+ # Glob pattern
490
+ if not case_sensitive:
491
+ pattern = pattern.lower()
492
+ matcher = lambda name: fnmatch.fnmatch(name.lower(), pattern)
493
+ else:
494
+ matcher = lambda name: fnmatch.fnmatch(name, pattern)
495
+
496
+ # Walk directory tree
497
+ for dirpath, dirnames, filenames in os.walk(str(root), followlinks=follow_symlinks):
498
+ # Check depth
499
+ if max_depth is not None:
500
+ depth = len(Path(dirpath).relative_to(root).parts)
501
+ if depth > max_depth:
502
+ dirnames.clear() # Don't recurse deeper
503
+ continue
504
+
505
+ # Filter directories to skip
506
+ if respect_gitignore:
507
+ dirnames[:] = [d for d in dirnames if not self._should_ignore(os.path.join(dirpath, d), ignore_patterns)]
508
+
509
+ # Check directories
510
+ if file_type != "file":
511
+ for dirname in dirnames:
512
+ if matcher(dirname):
513
+ full_path = os.path.join(dirpath, dirname)
514
+ if not self._should_ignore(full_path, ignore_patterns):
515
+ try:
516
+ stat = os.stat(full_path)
517
+ match = FileMatch(
518
+ path=full_path,
519
+ name=dirname,
520
+ size=0, # Directories don't have size
521
+ modified=stat.st_mtime,
522
+ is_dir=True,
523
+ extension="",
524
+ depth=len(Path(full_path).relative_to(root).parts)
525
+ )
526
+ matches.append(match)
527
+ except OSError:
528
+ continue
529
+
530
+ # Check files
531
+ if file_type != "dir":
532
+ for filename in filenames:
533
+ full_path = os.path.join(dirpath, filename)
534
+
535
+ if self._should_ignore(full_path, ignore_patterns):
536
+ continue
537
+
538
+ # Match against filename
539
+ if matcher(filename):
540
+ match_found = True
541
+ elif in_content:
542
+ # Search in file content
543
+ match_found = await self._search_in_file(full_path, pattern, case_sensitive)
544
+ else:
545
+ match_found = False
546
+
547
+ if match_found:
548
+ try:
549
+ stat = os.stat(full_path)
550
+ match = FileMatch(
551
+ path=full_path,
552
+ name=filename,
553
+ size=stat.st_size,
554
+ modified=stat.st_mtime,
555
+ is_dir=False,
556
+ extension=Path(filename).suffix,
557
+ depth=len(Path(full_path).relative_to(root).parts)
558
+ )
559
+ matches.append(match)
560
+ except OSError:
561
+ continue
562
+
563
+ return matches
564
+
565
+ async def _search_in_file(self, file_path: str, pattern: str, case_sensitive: bool) -> bool:
566
+ """Search for pattern in file content."""
567
+ try:
568
+ with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
569
+ content = f.read()
570
+ if not case_sensitive:
571
+ return pattern.lower() in content.lower()
572
+ else:
573
+ return pattern in content
574
+ except:
575
+ return False
576
+
577
+
578
+ # Tool registration
579
+ def create_find_tool():
580
+ """Factory function to create find tool."""
581
+ return FindTool()