tree-sitter-analyzer 1.7.1__py3-none-any.whl → 1.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

@@ -11,7 +11,7 @@ Architecture:
11
11
  - Data Models: Generic and language-specific code element representations
12
12
  """
13
13
 
14
- __version__ = "1.7.1"
14
+ __version__ = "1.7.2"
15
15
  __author__ = "aisheng.yu"
16
16
  __email__ = "aimasteracc@gmail.com"
17
17
 
@@ -13,6 +13,7 @@ import time
13
13
  from typing import Any
14
14
 
15
15
  from ..utils.error_handler import handle_mcp_errors
16
+ from ..utils.file_output_manager import FileOutputManager
16
17
  from ..utils.gitignore_detector import get_default_detector
17
18
  from . import fd_rg_utils
18
19
  from .base_tool import BaseMCPTool
@@ -23,6 +24,11 @@ logger = logging.getLogger(__name__)
23
24
  class FindAndGrepTool(BaseMCPTool):
24
25
  """MCP tool that composes fd and ripgrep with safety limits and metadata."""
25
26
 
27
+ def __init__(self, project_root: str | None = None) -> None:
28
+ """Initialize the find and grep tool."""
29
+ super().__init__(project_root)
30
+ self.file_output_manager = FileOutputManager(project_root)
31
+
26
32
  def get_tool_definition(self) -> dict[str, Any]:
27
33
  return {
28
34
  "name": "find_and_grep",
@@ -191,6 +197,15 @@ class FindAndGrepTool(BaseMCPTool):
191
197
  "default": False,
192
198
  "description": "Return only the total match count as a number. Most token-efficient option for count queries. Takes priority over all other formats",
193
199
  },
200
+ "output_file": {
201
+ "type": "string",
202
+ "description": "Optional filename to save output to file (extension auto-detected based on content)",
203
+ },
204
+ "suppress_output": {
205
+ "type": "boolean",
206
+ "description": "When true and output_file is specified, suppress detailed output in response to save tokens",
207
+ "default": False,
208
+ },
194
209
  },
195
210
  "required": ["roots", "query"],
196
211
  "additionalProperties": False,
@@ -373,11 +388,7 @@ class FindAndGrepTool(BaseMCPTool):
373
388
  context_before=arguments.get("context_before"),
374
389
  context_after=arguments.get("context_after"),
375
390
  encoding=arguments.get("encoding"),
376
- max_count=fd_rg_utils.clamp_int(
377
- arguments.get("max_count"),
378
- fd_rg_utils.DEFAULT_RESULTS_LIMIT,
379
- fd_rg_utils.MAX_RESULTS_HARD_CAP,
380
- ),
391
+ max_count=arguments.get("max_count"),
381
392
  timeout_ms=arguments.get("timeout_ms"),
382
393
  roots=rg_roots,
383
394
  files_from=None,
@@ -427,9 +438,18 @@ class FindAndGrepTool(BaseMCPTool):
427
438
  else:
428
439
  # Parse full match details
429
440
  matches = fd_rg_utils.parse_rg_json_lines_to_matches(rg_out)
430
- truncated_rg = len(matches) >= fd_rg_utils.MAX_RESULTS_HARD_CAP
431
- if truncated_rg:
432
- matches = matches[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
441
+
442
+ # Apply user-specified max_count limit if provided
443
+ # Note: ripgrep's -m option limits matches per file, not total matches
444
+ # So we need to apply the total limit here in post-processing
445
+ user_max_count = arguments.get("max_count")
446
+ if user_max_count is not None and len(matches) > user_max_count:
447
+ matches = matches[:user_max_count]
448
+ truncated_rg = True
449
+ else:
450
+ truncated_rg = len(matches) >= fd_rg_utils.MAX_RESULTS_HARD_CAP
451
+ if truncated_rg:
452
+ matches = matches[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
433
453
 
434
454
  # Apply path optimization if requested
435
455
  optimize_paths = arguments.get("optimize_paths", False)
@@ -452,12 +472,55 @@ class FindAndGrepTool(BaseMCPTool):
452
472
  "fd_elapsed_ms": fd_elapsed_ms,
453
473
  "rg_elapsed_ms": rg_elapsed_ms,
454
474
  }
475
+
476
+ # Handle output suppression and file output for grouped results
477
+ output_file = arguments.get("output_file")
478
+ suppress_output = arguments.get("suppress_output", False)
479
+
480
+ # Handle file output if requested
481
+ if output_file:
482
+ try:
483
+ # Save full result to file
484
+ import json
485
+ json_content = json.dumps(grouped_result, indent=2, ensure_ascii=False)
486
+ file_path = self.file_output_manager.save_to_file(
487
+ content=json_content,
488
+ base_name=output_file
489
+ )
490
+
491
+ # If suppress_output is True, return minimal response
492
+ if suppress_output:
493
+ minimal_result = {
494
+ "success": grouped_result.get("success", True),
495
+ "count": grouped_result.get("count", 0),
496
+ "output_file": output_file,
497
+ "file_saved": f"Results saved to {file_path}"
498
+ }
499
+ return minimal_result
500
+ else:
501
+ # Include file info in full response
502
+ grouped_result["output_file"] = output_file
503
+ grouped_result["file_saved"] = f"Results saved to {file_path}"
504
+ except Exception as e:
505
+ logger.error(f"Failed to save output to file: {e}")
506
+ grouped_result["file_save_error"] = str(e)
507
+ grouped_result["file_saved"] = False
508
+ elif suppress_output:
509
+ # If suppress_output is True but no output_file, remove detailed results
510
+ minimal_result = {
511
+ "success": grouped_result.get("success", True),
512
+ "count": grouped_result.get("count", 0),
513
+ "summary": grouped_result.get("summary", {}),
514
+ "meta": grouped_result.get("meta", {})
515
+ }
516
+ return minimal_result
517
+
455
518
  return grouped_result
456
519
 
457
520
  # Check if summary_only mode is requested
458
521
  if arguments.get("summary_only", False):
459
522
  summary = fd_rg_utils.summarize_search_results(matches)
460
- return {
523
+ result = {
461
524
  "success": True,
462
525
  "summary_only": True,
463
526
  "summary": summary,
@@ -468,10 +531,53 @@ class FindAndGrepTool(BaseMCPTool):
468
531
  "rg_elapsed_ms": rg_elapsed_ms,
469
532
  },
470
533
  }
534
+
535
+ # Handle output suppression and file output for summary results
536
+ output_file = arguments.get("output_file")
537
+ suppress_output = arguments.get("suppress_output", False)
538
+
539
+ # Handle file output if requested
540
+ if output_file:
541
+ try:
542
+ # Save full result to file
543
+ import json
544
+ json_content = json.dumps(result, indent=2, ensure_ascii=False)
545
+ file_path = self.file_output_manager.save_to_file(
546
+ content=json_content,
547
+ base_name=output_file
548
+ )
549
+
550
+ # If suppress_output is True, return minimal response
551
+ if suppress_output:
552
+ minimal_result = {
553
+ "success": result.get("success", True),
554
+ "count": len(matches),
555
+ "output_file": output_file,
556
+ "file_saved": f"Results saved to {file_path}"
557
+ }
558
+ return minimal_result
559
+ else:
560
+ # Include file info in full response
561
+ result["output_file"] = output_file
562
+ result["file_saved"] = f"Results saved to {file_path}"
563
+ except Exception as e:
564
+ logger.error(f"Failed to save output to file: {e}")
565
+ result["file_save_error"] = str(e)
566
+ result["file_saved"] = False
567
+ elif suppress_output:
568
+ # If suppress_output is True but no output_file, remove detailed results
569
+ minimal_result = {
570
+ "success": result.get("success", True),
571
+ "count": len(matches),
572
+ "summary": result.get("summary", {}),
573
+ "meta": result.get("meta", {})
574
+ }
575
+ return minimal_result
576
+
577
+ return result
471
578
  else:
472
- return {
579
+ result = {
473
580
  "success": True,
474
- "results": matches,
475
581
  "count": len(matches),
476
582
  "meta": {
477
583
  "searched_file_count": searched_file_count,
@@ -480,3 +586,70 @@ class FindAndGrepTool(BaseMCPTool):
480
586
  "rg_elapsed_ms": rg_elapsed_ms,
481
587
  },
482
588
  }
589
+
590
+ # Handle output suppression and file output
591
+ output_file = arguments.get("output_file")
592
+ suppress_output = arguments.get("suppress_output", False)
593
+
594
+ # Add results to response unless suppressed
595
+ if not suppress_output or not output_file:
596
+ result["results"] = matches
597
+
598
+ # Handle file output if requested
599
+ if output_file:
600
+ try:
601
+ # Create detailed output for file
602
+ file_content = {
603
+ "success": True,
604
+ "results": matches,
605
+ "count": len(matches),
606
+ "files": fd_rg_utils.group_matches_by_file(matches)["files"] if matches else [],
607
+ "summary": fd_rg_utils.summarize_search_results(matches),
608
+ "meta": result["meta"]
609
+ }
610
+
611
+ # Convert to JSON for file output
612
+ # Save full result to file using FileOutputManager
613
+ import json
614
+ json_content = json.dumps(file_content, indent=2, ensure_ascii=False)
615
+ file_path = self.file_output_manager.save_to_file(
616
+ content=json_content,
617
+ base_name=output_file
618
+ )
619
+
620
+ # Check if suppress_output is enabled
621
+ suppress_output = arguments.get("suppress_output", False)
622
+ if suppress_output:
623
+ # Return minimal response to save tokens
624
+ minimal_result = {
625
+ "success": result.get("success", True),
626
+ "count": result.get("count", 0),
627
+ "output_file": output_file,
628
+ "file_saved": f"Results saved to {file_path}"
629
+ }
630
+ return minimal_result
631
+ else:
632
+ # Include file info in full response
633
+ result["output_file"] = output_file
634
+ result["file_saved"] = f"Results saved to {file_path}"
635
+
636
+ logger.info(f"Search results saved to: {file_path}")
637
+
638
+ except Exception as e:
639
+ logger.error(f"Failed to save output to file: {e}")
640
+ result["file_save_error"] = str(e)
641
+ result["file_saved"] = False
642
+ else:
643
+ # Handle suppress_output without file output
644
+ suppress_output = arguments.get("suppress_output", False)
645
+ if suppress_output:
646
+ # Return minimal response without detailed match results
647
+ minimal_result = {
648
+ "success": result.get("success", True),
649
+ "count": result.get("count", 0),
650
+ "summary": result.get("summary", {}),
651
+ "meta": result.get("meta", {})
652
+ }
653
+ return minimal_result
654
+
655
+ return result
@@ -13,6 +13,7 @@ from pathlib import Path
13
13
  from typing import Any
14
14
 
15
15
  from ..utils.error_handler import handle_mcp_errors
16
+ from ..utils.file_output_manager import FileOutputManager
16
17
  from ..utils.gitignore_detector import get_default_detector
17
18
  from . import fd_rg_utils
18
19
  from .base_tool import BaseMCPTool
@@ -110,6 +111,15 @@ class ListFilesTool(BaseMCPTool):
110
111
  "default": False,
111
112
  "description": "Return only the total count of matching files instead of file details. Useful for quick statistics",
112
113
  },
114
+ "output_file": {
115
+ "type": "string",
116
+ "description": "Optional filename to save output to file (extension auto-detected based on content)"
117
+ },
118
+ "suppress_output": {
119
+ "type": "boolean",
120
+ "description": "When true and output_file is specified, suppress detailed output in response to save tokens",
121
+ "default": False
122
+ },
113
123
  },
114
124
  "required": ["roots"],
115
125
  "additionalProperties": False,
@@ -243,7 +253,7 @@ class ListFilesTool(BaseMCPTool):
243
253
  else:
244
254
  truncated = False
245
255
 
246
- return {
256
+ result = {
247
257
  "success": True,
248
258
  "count_only": True,
249
259
  "total_count": total_count,
@@ -251,6 +261,52 @@ class ListFilesTool(BaseMCPTool):
251
261
  "elapsed_ms": elapsed_ms,
252
262
  }
253
263
 
264
+ # Handle file output for count_only mode
265
+ output_file = arguments.get("output_file")
266
+ suppress_output = arguments.get("suppress_output", False)
267
+
268
+ if output_file:
269
+ file_manager = FileOutputManager(self.project_root)
270
+ file_content = {
271
+ "count_only": True,
272
+ "total_count": total_count,
273
+ "truncated": truncated,
274
+ "elapsed_ms": elapsed_ms,
275
+ "query_info": {
276
+ "roots": arguments.get("roots", []),
277
+ "pattern": arguments.get("pattern"),
278
+ "glob": arguments.get("glob", False),
279
+ "types": arguments.get("types"),
280
+ "extensions": arguments.get("extensions"),
281
+ "exclude": arguments.get("exclude"),
282
+ "limit": limit,
283
+ }
284
+ }
285
+
286
+ try:
287
+ import json
288
+ json_content = json.dumps(file_content, indent=2, ensure_ascii=False)
289
+ saved_path = file_manager.save_to_file(
290
+ content=json_content,
291
+ base_name=output_file
292
+ )
293
+ result["output_file"] = saved_path
294
+
295
+ if suppress_output:
296
+ # Return minimal response to save tokens
297
+ return {
298
+ "success": True,
299
+ "count_only": True,
300
+ "total_count": total_count,
301
+ "output_file": saved_path,
302
+ "message": f"Count results saved to {saved_path}"
303
+ }
304
+ except Exception as e:
305
+ logger.warning(f"Failed to save output file: {e}")
306
+ result["output_file_error"] = str(e)
307
+
308
+ return result
309
+
254
310
  # Truncate defensively even if fd didn't
255
311
  truncated = False
256
312
  if len(lines) > fd_rg_utils.MAX_RESULTS_HARD_CAP:
@@ -283,10 +339,64 @@ class ListFilesTool(BaseMCPTool):
283
339
  except (OSError, ValueError): # nosec B112
284
340
  continue
285
341
 
286
- return {
342
+ result = {
287
343
  "success": True,
288
344
  "count": len(results),
289
345
  "truncated": truncated,
290
346
  "elapsed_ms": elapsed_ms,
291
347
  "results": results,
292
348
  }
349
+
350
+ # Handle file output for detailed results
351
+ output_file = arguments.get("output_file")
352
+ suppress_output = arguments.get("suppress_output", False)
353
+
354
+ if output_file:
355
+ file_manager = FileOutputManager(self.project_root)
356
+ file_content = {
357
+ "count": len(results),
358
+ "truncated": truncated,
359
+ "elapsed_ms": elapsed_ms,
360
+ "results": results,
361
+ "query_info": {
362
+ "roots": arguments.get("roots", []),
363
+ "pattern": arguments.get("pattern"),
364
+ "glob": arguments.get("glob", False),
365
+ "types": arguments.get("types"),
366
+ "extensions": arguments.get("extensions"),
367
+ "exclude": arguments.get("exclude"),
368
+ "depth": arguments.get("depth"),
369
+ "follow_symlinks": arguments.get("follow_symlinks", False),
370
+ "hidden": arguments.get("hidden", False),
371
+ "no_ignore": no_ignore,
372
+ "size": arguments.get("size"),
373
+ "changed_within": arguments.get("changed_within"),
374
+ "changed_before": arguments.get("changed_before"),
375
+ "full_path_match": arguments.get("full_path_match", False),
376
+ "absolute": arguments.get("absolute", True),
377
+ "limit": limit,
378
+ }
379
+ }
380
+
381
+ try:
382
+ import json
383
+ json_content = json.dumps(file_content, indent=2, ensure_ascii=False)
384
+ saved_path = file_manager.save_to_file(
385
+ content=json_content,
386
+ base_name=output_file
387
+ )
388
+ result["output_file"] = saved_path
389
+
390
+ if suppress_output:
391
+ # Return minimal response to save tokens
392
+ return {
393
+ "success": True,
394
+ "count": len(results),
395
+ "output_file": saved_path,
396
+ "message": f"File list results saved to {saved_path}"
397
+ }
398
+ except Exception as e:
399
+ logger.warning(f"Failed to save output file: {e}")
400
+ result["output_file_error"] = str(e)
401
+
402
+ return result
@@ -13,6 +13,7 @@ from pathlib import Path
13
13
  from typing import Any
14
14
 
15
15
  from ..utils.error_handler import handle_mcp_errors
16
+ from ..utils.file_output_manager import FileOutputManager
16
17
  from ..utils.gitignore_detector import get_default_detector
17
18
  from ..utils.search_cache import get_default_cache
18
19
  from . import fd_rg_utils
@@ -36,6 +37,7 @@ class SearchContentTool(BaseMCPTool):
36
37
  """
37
38
  super().__init__(project_root)
38
39
  self.cache = get_default_cache() if enable_cache else None
40
+ self.file_output_manager = FileOutputManager(project_root)
39
41
 
40
42
  def get_tool_definition(self) -> dict[str, Any]:
41
43
  return {
@@ -153,6 +155,15 @@ class SearchContentTool(BaseMCPTool):
153
155
  "default": False,
154
156
  "description": "Return only the total match count as a number. Most token-efficient option for count queries. Takes priority over all other formats",
155
157
  },
158
+ "output_file": {
159
+ "type": "string",
160
+ "description": "Optional filename to save output to file (extension auto-detected based on content)",
161
+ },
162
+ "suppress_output": {
163
+ "type": "boolean",
164
+ "description": "When true and output_file is specified, suppress detailed output in response to save tokens",
165
+ "default": False,
166
+ },
156
167
  },
157
168
  "required": ["query"],
158
169
  "anyOf": [
@@ -312,12 +323,18 @@ class SearchContentTool(BaseMCPTool):
312
323
  cached_result["cache_hit"] = True
313
324
  return cached_result
314
325
 
315
- # Clamp counts to safety limits
316
- max_count = fd_rg_utils.clamp_int(
317
- arguments.get("max_count"),
318
- fd_rg_utils.DEFAULT_RESULTS_LIMIT,
319
- fd_rg_utils.DEFAULT_RESULTS_LIMIT,
320
- )
326
+ # Handle max_count parameter properly
327
+ # If user specifies max_count, use it directly (with reasonable upper limit)
328
+ # If not specified, use None to let ripgrep return all matches (subject to hard cap later)
329
+ max_count = arguments.get("max_count")
330
+ if max_count is not None:
331
+ # Clamp user-specified max_count to reasonable limits
332
+ # Use 1 as minimum default, but respect user's small values
333
+ max_count = fd_rg_utils.clamp_int(
334
+ max_count,
335
+ 1, # Minimum default value
336
+ fd_rg_utils.DEFAULT_RESULTS_LIMIT, # Upper limit for safety
337
+ )
321
338
  timeout_ms = arguments.get("timeout_ms")
322
339
 
323
340
  # Note: --files-from is not supported in this ripgrep version
@@ -461,9 +478,18 @@ class SearchContentTool(BaseMCPTool):
461
478
 
462
479
  # Handle normal mode
463
480
  matches = fd_rg_utils.parse_rg_json_lines_to_matches(out)
464
- truncated = len(matches) >= fd_rg_utils.MAX_RESULTS_HARD_CAP
465
- if truncated:
466
- matches = matches[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
481
+
482
+ # Apply user-specified max_count limit if provided
483
+ # Note: ripgrep's -m option limits matches per file, not total matches
484
+ # So we need to apply the total limit here in post-processing
485
+ user_max_count = arguments.get("max_count")
486
+ if user_max_count is not None and len(matches) > user_max_count:
487
+ matches = matches[:user_max_count]
488
+ truncated = True
489
+ else:
490
+ truncated = len(matches) >= fd_rg_utils.MAX_RESULTS_HARD_CAP
491
+ if truncated:
492
+ matches = matches[: fd_rg_utils.MAX_RESULTS_HARD_CAP]
467
493
 
468
494
  # Apply path optimization if requested
469
495
  optimize_paths = arguments.get("optimize_paths", False)
@@ -475,6 +501,54 @@ class SearchContentTool(BaseMCPTool):
475
501
  if group_by_file and matches:
476
502
  result = fd_rg_utils.group_matches_by_file(matches)
477
503
 
504
+ # Handle output suppression and file output for grouped results
505
+ output_file = arguments.get("output_file")
506
+ suppress_output = arguments.get("suppress_output", False)
507
+
508
+ # Handle file output if requested
509
+ if output_file:
510
+ try:
511
+ # Save full result to file
512
+ import json
513
+ json_content = json.dumps(result, indent=2, ensure_ascii=False)
514
+ file_path = self.file_output_manager.save_to_file(
515
+ content=json_content,
516
+ base_name=output_file
517
+ )
518
+
519
+ # If suppress_output is True, return minimal response
520
+ if suppress_output:
521
+ minimal_result = {
522
+ "success": result.get("success", True),
523
+ "count": result.get("count", 0),
524
+ "output_file": output_file,
525
+ "file_saved": f"Results saved to {file_path}"
526
+ }
527
+ # Cache the full result, not the minimal one
528
+ if self.cache and cache_key:
529
+ self.cache.set(cache_key, result)
530
+ return minimal_result
531
+ else:
532
+ # Include file info in full response
533
+ result["output_file"] = output_file
534
+ result["file_saved"] = f"Results saved to {file_path}"
535
+ except Exception as e:
536
+ logger.error(f"Failed to save output to file: {e}")
537
+ result["file_save_error"] = str(e)
538
+ result["file_saved"] = False
539
+ elif suppress_output:
540
+ # If suppress_output is True but no output_file, remove detailed results
541
+ minimal_result = {
542
+ "success": result.get("success", True),
543
+ "count": result.get("count", 0),
544
+ "summary": result.get("summary", {}),
545
+ "meta": result.get("meta", {})
546
+ }
547
+ # Cache the full result, not the minimal one
548
+ if self.cache and cache_key:
549
+ self.cache.set(cache_key, result)
550
+ return minimal_result
551
+
478
552
  # Cache the result
479
553
  if self.cache and cache_key:
480
554
  self.cache.set(cache_key, result)
@@ -492,6 +566,54 @@ class SearchContentTool(BaseMCPTool):
492
566
  "summary": summary,
493
567
  }
494
568
 
569
+ # Handle output suppression and file output for summary results
570
+ output_file = arguments.get("output_file")
571
+ suppress_output = arguments.get("suppress_output", False)
572
+
573
+ # Handle file output if requested
574
+ if output_file:
575
+ try:
576
+ # Save full result to file
577
+ import json
578
+ json_content = json.dumps(result, indent=2, ensure_ascii=False)
579
+ file_path = self.file_output_manager.save_to_file(
580
+ content=json_content,
581
+ base_name=output_file
582
+ )
583
+
584
+ # If suppress_output is True, return minimal response
585
+ if suppress_output:
586
+ minimal_result = {
587
+ "success": result.get("success", True),
588
+ "count": result.get("count", 0),
589
+ "output_file": output_file,
590
+ "file_saved": f"Results saved to {file_path}"
591
+ }
592
+ # Cache the full result, not the minimal one
593
+ if self.cache and cache_key:
594
+ self.cache.set(cache_key, result)
595
+ return minimal_result
596
+ else:
597
+ # Include file info in full response
598
+ result["output_file"] = output_file
599
+ result["file_saved"] = f"Results saved to {file_path}"
600
+ except Exception as e:
601
+ logger.error(f"Failed to save output to file: {e}")
602
+ result["file_save_error"] = str(e)
603
+ result["file_saved"] = False
604
+ elif suppress_output:
605
+ # If suppress_output is True but no output_file, remove detailed results
606
+ minimal_result = {
607
+ "success": result.get("success", True),
608
+ "count": result.get("count", 0),
609
+ "summary": result.get("summary", {}),
610
+ "elapsed_ms": result.get("elapsed_ms", 0)
611
+ }
612
+ # Cache the full result, not the minimal one
613
+ if self.cache and cache_key:
614
+ self.cache.set(cache_key, result)
615
+ return minimal_result
616
+
495
617
  # Cache the result
496
618
  if self.cache and cache_key:
497
619
  self.cache.set(cache_key, result)
@@ -503,9 +625,87 @@ class SearchContentTool(BaseMCPTool):
503
625
  "count": len(matches),
504
626
  "truncated": truncated,
505
627
  "elapsed_ms": elapsed_ms,
506
- "results": matches,
507
628
  }
508
629
 
630
+ # Handle output suppression and file output
631
+ output_file = arguments.get("output_file")
632
+ suppress_output = arguments.get("suppress_output", False)
633
+
634
+ # Always add results to the base result for file saving
635
+ result["results"] = matches
636
+
637
+ # Handle file output if requested
638
+ if output_file:
639
+ try:
640
+ # Create detailed output for file
641
+ file_content = {
642
+ "success": True,
643
+ "count": len(matches),
644
+ "truncated": truncated,
645
+ "elapsed_ms": elapsed_ms,
646
+ "results": matches,
647
+ "summary": fd_rg_utils.summarize_search_results(matches),
648
+ "grouped_by_file": fd_rg_utils.group_matches_by_file(matches)["files"] if matches else []
649
+ }
650
+
651
+ # Convert to JSON for file output
652
+ import json
653
+ json_content = json.dumps(file_content, indent=2, ensure_ascii=False)
654
+
655
+ # Save to file
656
+ saved_file_path = self.file_output_manager.save_to_file(
657
+ content=json_content,
658
+ base_name=output_file
659
+ )
660
+
661
+ result["output_file_path"] = saved_file_path
662
+ result["file_saved"] = True
663
+
664
+ logger.info(f"Search results saved to: {saved_file_path}")
665
+
666
+ except Exception as e:
667
+ logger.error(f"Failed to save output to file: {e}")
668
+ result["file_save_error"] = str(e)
669
+ result["file_saved"] = False
670
+
671
+ # Handle file output and suppression
672
+ output_file = arguments.get("output_file")
673
+ suppress_output = arguments.get("suppress_output", False)
674
+
675
+ if output_file:
676
+ # Save full result to file
677
+ import json
678
+ json_content = json.dumps(result, indent=2, ensure_ascii=False)
679
+ file_path = self.file_output_manager.save_to_file(
680
+ content=json_content,
681
+ base_name=output_file
682
+ )
683
+
684
+ # If suppress_output is True, return minimal response
685
+ if suppress_output:
686
+ minimal_result = {
687
+ "success": result.get("success", True),
688
+ "count": result.get("count", 0),
689
+ "output_file": output_file,
690
+ "file_saved": f"Results saved to {file_path}"
691
+ }
692
+ # Cache the full result, not the minimal one
693
+ if self.cache and cache_key:
694
+ self.cache.set(cache_key, result)
695
+ return minimal_result
696
+ else:
697
+ # Include file info in full response
698
+ result["output_file"] = output_file
699
+ result["file_saved"] = f"Results saved to {file_path}"
700
+ elif suppress_output:
701
+ # If suppress_output is True but no output_file, remove results from response
702
+ result_copy = result.copy()
703
+ result_copy.pop("results", None)
704
+ # Cache the full result, not the minimal one
705
+ if self.cache and cache_key:
706
+ self.cache.set(cache_key, result)
707
+ return result_copy
708
+
509
709
  # Cache the result
510
710
  if self.cache and cache_key:
511
711
  self.cache.set(cache_key, result)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tree-sitter-analyzer
3
- Version: 1.7.1
3
+ Version: 1.7.2
4
4
  Summary: Extensible multi-language code analyzer framework using Tree-sitter with dynamic plugin architecture
5
5
  Project-URL: Homepage, https://github.com/aimasteracc/tree-sitter-analyzer
6
6
  Project-URL: Documentation, https://github.com/aimasteracc/tree-sitter-analyzer#readme
@@ -165,11 +165,11 @@ Description-Content-Type: text/markdown
165
165
 
166
166
  [![Python Version](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://python.org)
167
167
  [![License](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE)
168
- [![Tests](https://img.shields.io/badge/tests-2662%20passed-brightgreen.svg)](#quality-assurance)
169
- [![Coverage](https://img.shields.io/badge/coverage-79.16%25-green.svg)](#quality-assurance)
168
+ [![Tests](https://img.shields.io/badge/tests-2675%20passed-brightgreen.svg)](#quality-assurance)
169
+ [![Coverage](https://img.shields.io/badge/coverage-78.85%25-green.svg)](#quality-assurance)
170
170
  [![Quality](https://img.shields.io/badge/quality-enterprise%20grade-blue.svg)](#quality-assurance)
171
171
  [![PyPI](https://img.shields.io/pypi/v/tree-sitter-analyzer.svg)](https://pypi.org/project/tree-sitter-analyzer/)
172
- [![Version](https://img.shields.io/badge/version-1.7.1-blue.svg)](https://github.com/aimasteracc/tree-sitter-analyzer/releases)
172
+ [![Version](https://img.shields.io/badge/version-1.7.2-blue.svg)](https://github.com/aimasteracc/tree-sitter-analyzer/releases)
173
173
  [![GitHub Stars](https://img.shields.io/github/stars/aimasteracc/tree-sitter-analyzer.svg?style=social)](https://github.com/aimasteracc/tree-sitter-analyzer)
174
174
 
175
175
  ## 🚀 Enterprise-Grade Code Analysis Tool for the AI Era
@@ -218,8 +218,8 @@ Tree-sitter Analyzer is an enterprise-grade code analysis tool designed for the
218
218
  | **Go** | Basic Support | Basic syntax parsing |
219
219
 
220
220
  ### 🏆 Production Ready
221
- - **2,662 Tests** - 100% pass rate, enterprise-grade quality assurance
222
- - **79.16% Coverage** - Comprehensive test coverage
221
+ - **2,675 Tests** - 100% pass rate, enterprise-grade quality assurance
222
+ - **78.85% Coverage** - Comprehensive test coverage
223
223
  - **Cross-platform Support** - Compatible with Windows, macOS, Linux
224
224
  - **Continuous Maintenance** - Active development and community support
225
225
 
@@ -585,9 +585,19 @@ Tree-sitter Analyzer provides a rich set of MCP tools designed for AI assistants
585
585
  | **📁 Resource Access** | Code file resources | URI code file access | File content access via URI identification |
586
586
  | | Project statistics resources | Project statistics data access | Project analysis data and statistical information |
587
587
 
588
- ### 🆕 v1.7.0 New Feature: suppress_output Function
588
+ ### 🆕 v1.7.2 New Feature: File Output Optimization
589
589
 
590
- The newly added `suppress_output` parameter in the `analyze_code_structure` tool is a revolutionary token optimization feature:
590
+ MCP search tools' newly added file output optimization feature is a revolutionary token-saving solution:
591
+
592
+ - **🎯 File Output Optimization**: `find_and_grep`, `list_files`, and `search_content` tools now include `suppress_output` and `output_file` parameters
593
+ - **🔄 Automatic Format Detection**: Smart file format selection (JSON/Markdown) based on content type
594
+ - **💾 Massive Token Savings**: Response size reduced by up to 99% when saving large search results to files
595
+ - **📚 ROO Rules Documentation**: Added comprehensive tree-sitter-analyzer MCP optimization usage guide
596
+ - **🔧 Backward Compatibility**: Optional feature that doesn't affect existing functionality
597
+
598
+ ### 🆕 v1.7.0 Feature: suppress_output Function
599
+
600
+ The `suppress_output` parameter in the `analyze_code_structure` tool:
591
601
 
592
602
  - **Problem solved**: When analysis results are too large, traditional methods return complete table data, consuming massive tokens
593
603
  - **Intelligent optimization**: When `suppress_output=true` and `output_file` specified, only basic metadata is returned
@@ -708,16 +718,18 @@ uv run python -m tree_sitter_analyzer --show-query-languages
708
718
  ## 8. 🏆 Quality Assurance
709
719
 
710
720
  ### 📊 Quality Metrics
711
- - **2,662 tests** - 100% pass rate ✅
712
- - **79.16% code coverage** - Comprehensive test suite
721
+ - **2,675 tests** - 100% pass rate ✅
722
+ - **78.85% code coverage** - Comprehensive test suite
713
723
  - **Zero test failures** - Production ready
714
724
  - **Cross-platform support** - Windows, macOS, Linux
715
725
 
716
- ### ⚡ Latest Quality Achievements (v1.7.0)
717
- - ✅ **Token saving feature** - New suppress_output parameter automatically suppresses table output when file output is specified, saving AI token consumption
718
- - ✅ **Intelligent output control** - Automatically optimizes response size when output_file is specified and suppress_output=true
719
- - ✅ **Enterprise-grade test coverage** - Comprehensive test suite including complete validation of suppress_output functionality
720
- - ✅ **MCP tool enhancement** - Complete MCP server tool set supporting advanced file search and content analysis
726
+ ### ⚡ Latest Quality Achievements (v1.7.2)
727
+ - ✅ **File output optimization** - MCP search tools now include `suppress_output` and `output_file` parameters for massive token savings
728
+ - ✅ **Intelligent format detection** - Automatic selection of optimal file formats (JSON/Markdown) for storage and reading optimization
729
+ - ✅ **ROO rules documentation** - Added comprehensive tree-sitter-analyzer MCP optimization usage guide
730
+ - ✅ **Enhanced token management** - Response size reduced by up to 99% when outputting search results to files
731
+ - ✅ **Enterprise-grade test coverage** - Comprehensive test suite including complete validation of file output optimization features
732
+ - ✅ **Complete MCP tools** - Complete MCP server tool set supporting advanced file search and content analysis
721
733
  - ✅ **Cross-platform path compatibility** - Fixed differences between Windows short path names and macOS symbolic links
722
734
  - ✅ **GitFlow implementation** - Professional development/release branch strategy
723
735
 
@@ -760,7 +772,7 @@ uv run pytest tests/test_mcp_server_initialization.py -v
760
772
  **Verification environment:**
761
773
  - Operating systems: Windows 10, macOS, Linux
762
774
  - Python version: 3.10+
763
- - Project version: tree-sitter-analyzer v1.7.0
775
+ - Project version: tree-sitter-analyzer v1.7.2
764
776
  - Test files: BigService.java (1419 lines), sample.py (256 lines), MultiClass.java (54 lines)
765
777
 
766
778
  ---
@@ -1,4 +1,4 @@
1
- tree_sitter_analyzer/__init__.py,sha256=Ef0vV4IRflLl-9sQtumxc31etSQ9V6jofhzbUOHp1zQ,3067
1
+ tree_sitter_analyzer/__init__.py,sha256=kz0kyobXXhmOq8JWuJHjr3MhC9OfOGOJ5CAcq7e_0d4,3067
2
2
  tree_sitter_analyzer/__main__.py,sha256=Zl79tpe4UaMu-7yeztc06tgP0CVMRnvGgas4ZQP5SCs,228
3
3
  tree_sitter_analyzer/api.py,sha256=jzwID6fJNdhQkJP3D0lzBVPhOnGIN4tyyMtmRYdK9zI,22753
4
4
  tree_sitter_analyzer/cli_main.py,sha256=BuaM-L-Jx3G49qvAUOQVsw0wEM-X0UzPaRszRZBist4,10374
@@ -64,11 +64,11 @@ tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py,sha256=JyS9gey2oFoWjzsiiLjw
64
64
  tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py,sha256=mssed7bEfGeGxW4mOf7dg8BDS1oqHLolIBNX9DaZ3DM,8997
65
65
  tree_sitter_analyzer/mcp/tools/base_tool.py,sha256=qf2My325azlnKOugNVMN_R1jtZcjXVy354sGVKzvZls,3546
66
66
  tree_sitter_analyzer/mcp/tools/fd_rg_utils.py,sha256=R1ICH40vkWO3OdKZjxok9ptQZpZ6-tM5SkLHHOC4-BE,17749
67
- tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py,sha256=G-aExFZUvpJtitmEfYAClsHmQ1p3HqsT4IkOlym9R_o,22100
68
- tree_sitter_analyzer/mcp/tools/list_files_tool.py,sha256=TA1BRQtb-D5x1pD-IcRJYnP0WnnFfl9q7skI25MOdHk,12873
67
+ tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py,sha256=2Y5V61T4-Y90W38erzIN6D1orlYlkPx9kaoKTfmInEs,31038
68
+ tree_sitter_analyzer/mcp/tools/list_files_tool.py,sha256=JxgLpFgJFk6_9VtlvzOlzWnfYs9yZOPIVwnl10pRoZU,17864
69
69
  tree_sitter_analyzer/mcp/tools/query_tool.py,sha256=1xY1ONNY2sIFJxoILlnNzBnwGVgzEF7vVJ2ccqR9auA,10879
70
70
  tree_sitter_analyzer/mcp/tools/read_partial_tool.py,sha256=BMAJF205hTIrYTQJG6N1-vVuKSby2CSm9nWzSMMWceI,11339
71
- tree_sitter_analyzer/mcp/tools/search_content_tool.py,sha256=PDYY_O7T0y4bnC6JNjtL1_TyZcib0EpxnPA6PfKueoQ,22489
71
+ tree_sitter_analyzer/mcp/tools/search_content_tool.py,sha256=2HbHxRzPkddXPgwKrx8ACZMuVF6R9fLZXM_EWILu-l4,31994
72
72
  tree_sitter_analyzer/mcp/tools/table_format_tool.py,sha256=VhmrDSKE5WdnoJhWjZc67z5q3qiBMBhNFW9uZ22psU8,21252
73
73
  tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py,sha256=-zZnqN9WcoyRTKM_16ADH859LSebzi34BGYwQL2zCOs,25084
74
74
  tree_sitter_analyzer/mcp/utils/__init__.py,sha256=TgTTKsRJAqF95g1fAp5SR_zQVDkImpc_5R0Dw529UUw,3126
@@ -89,7 +89,7 @@ tree_sitter_analyzer/security/__init__.py,sha256=ZTqTt24hsljCpTXAZpJC57L7MU5lJLT
89
89
  tree_sitter_analyzer/security/boundary_manager.py,sha256=3eeENRKWtz2pyZHzd8DiVaq8fdeC6s1eVOuBylSmQPg,9347
90
90
  tree_sitter_analyzer/security/regex_checker.py,sha256=jWK6H8PTPgzbwRPfK_RZ8bBTS6rtEbgjY5vr3YWjQ_U,9616
91
91
  tree_sitter_analyzer/security/validator.py,sha256=yR4qTWEcXpR--bSFwtWvSgY0AzqujOFAqlc1Z7dlTdk,9809
92
- tree_sitter_analyzer-1.7.1.dist-info/METADATA,sha256=AcTmJrBzB1FGvHkyUK9ic-9bR5HVuEZnExYb69aEBt8,36459
93
- tree_sitter_analyzer-1.7.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
94
- tree_sitter_analyzer-1.7.1.dist-info/entry_points.txt,sha256=gbMks8mHqDZWyGnawAW2uNU5g-mrD1vLBiIoLvnaJP0,780
95
- tree_sitter_analyzer-1.7.1.dist-info/RECORD,,
92
+ tree_sitter_analyzer-1.7.2.dist-info/METADATA,sha256=1JDW31p0z4DGtk18wP8h9Z7idPGSQvr9BbS-4wcz0-U,37336
93
+ tree_sitter_analyzer-1.7.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
94
+ tree_sitter_analyzer-1.7.2.dist-info/entry_points.txt,sha256=gbMks8mHqDZWyGnawAW2uNU5g-mrD1vLBiIoLvnaJP0,780
95
+ tree_sitter_analyzer-1.7.2.dist-info/RECORD,,