tree-sitter-analyzer 1.7.5__py3-none-any.whl → 1.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

@@ -11,7 +11,7 @@ Architecture:
11
11
  - Data Models: Generic and language-specific code element representations
12
12
  """
13
13
 
14
- __version__ = "1.7.5"
14
+ __version__ = "1.7.7"
15
15
  __author__ = "aisheng.yu"
16
16
  __email__ = "aimasteracc@gmail.com"
17
17
 
@@ -11,6 +11,7 @@ import logging
11
11
  from pathlib import Path
12
12
  from typing import Any
13
13
 
14
+ from . import __version__
14
15
  from .core.engine import AnalysisEngine
15
16
  from .utils import log_error
16
17
 
@@ -500,7 +501,7 @@ def get_framework_info() -> dict[str, Any]:
500
501
 
501
502
  return {
502
503
  "name": "tree-sitter-analyzer",
503
- "version": "2.0.0", # New architecture version
504
+ "version": __version__,
504
505
  "supported_languages": engine.get_supported_languages(),
505
506
  "total_languages": len(engine.get_supported_languages()),
506
507
  "plugin_info": {
@@ -521,7 +522,7 @@ def get_framework_info() -> dict[str, Any]:
521
522
  }
522
523
  except Exception as e:
523
524
  log_error(f"Failed to get framework info: {e}")
524
- return {"name": "tree-sitter-analyzer", "version": "2.0.0", "error": str(e)}
525
+ return {"name": "tree-sitter-analyzer", "version": __version__, "error": str(e)}
525
526
 
526
527
 
527
528
  def execute_query(
@@ -398,3 +398,337 @@ class RegexSecurityError(SecurityError):
398
398
  )
399
399
  self.pattern = pattern
400
400
  self.dangerous_construct = dangerous_construct
401
+
402
+
403
+ # MCP-specific exceptions for enhanced error handling
404
+ class MCPToolError(MCPError):
405
+ """Raised when MCP tool execution fails."""
406
+
407
+ def __init__(
408
+ self,
409
+ message: str,
410
+ tool_name: str | None = None,
411
+ input_params: dict[str, Any] | None = None,
412
+ execution_stage: str | None = None,
413
+ **kwargs: Any,
414
+ ) -> None:
415
+ context = kwargs.get("context", {})
416
+ if input_params:
417
+ # Sanitize sensitive information from input params
418
+ sanitized_params = self._sanitize_params(input_params)
419
+ context["input_params"] = sanitized_params
420
+ if execution_stage:
421
+ context["execution_stage"] = execution_stage
422
+
423
+ super().__init__(message, tool_name=tool_name, context=context, **kwargs)
424
+ self.input_params = input_params
425
+ self.execution_stage = execution_stage
426
+
427
+ @staticmethod
428
+ def _sanitize_params(params: dict[str, Any]) -> dict[str, Any]:
429
+ """Sanitize sensitive information from parameters."""
430
+ sanitized = {}
431
+ sensitive_keys = {"password", "token", "key", "secret", "auth", "credential"}
432
+
433
+ for key, value in params.items():
434
+ if any(sensitive in key.lower() for sensitive in sensitive_keys):
435
+ sanitized[key] = "***REDACTED***"
436
+ elif isinstance(value, str) and len(value) > 100:
437
+ sanitized[key] = value[:100] + "...[TRUNCATED]"
438
+ else:
439
+ sanitized[key] = value
440
+
441
+ return sanitized
442
+
443
+
444
+ class MCPResourceError(MCPError):
445
+ """Raised when MCP resource access fails."""
446
+
447
+ def __init__(
448
+ self,
449
+ message: str,
450
+ resource_uri: str | None = None,
451
+ resource_type: str | None = None,
452
+ access_mode: str | None = None,
453
+ **kwargs: Any,
454
+ ) -> None:
455
+ context = kwargs.get("context", {})
456
+ if resource_type:
457
+ context["resource_type"] = resource_type
458
+ if access_mode:
459
+ context["access_mode"] = access_mode
460
+
461
+ super().__init__(message, resource_uri=resource_uri, context=context, **kwargs)
462
+ self.resource_type = resource_type
463
+ self.access_mode = access_mode
464
+
465
+
466
+ class MCPTimeoutError(MCPError):
467
+ """Raised when MCP operation times out."""
468
+
469
+ def __init__(
470
+ self,
471
+ message: str,
472
+ timeout_seconds: float | None = None,
473
+ operation_type: str | None = None,
474
+ **kwargs: Any,
475
+ ) -> None:
476
+ context = kwargs.get("context", {})
477
+ if timeout_seconds:
478
+ context["timeout_seconds"] = timeout_seconds
479
+ if operation_type:
480
+ context["operation_type"] = operation_type
481
+
482
+ super().__init__(message, context=context, **kwargs)
483
+ self.timeout_seconds = timeout_seconds
484
+ self.operation_type = operation_type
485
+
486
+
487
+ class MCPValidationError(ValidationError):
488
+ """Raised when MCP input validation fails."""
489
+
490
+ def __init__(
491
+ self,
492
+ message: str,
493
+ tool_name: str | None = None,
494
+ parameter_name: str | None = None,
495
+ parameter_value: Any | None = None,
496
+ validation_rule: str | None = None,
497
+ **kwargs: Any,
498
+ ) -> None:
499
+ context = kwargs.get("context", {})
500
+ if tool_name:
501
+ context["tool_name"] = tool_name
502
+ if parameter_name:
503
+ context["parameter_name"] = parameter_name
504
+ if validation_rule:
505
+ context["validation_rule"] = validation_rule
506
+
507
+ # Sanitize parameter value for logging
508
+ if parameter_value is not None:
509
+ if isinstance(parameter_value, str) and len(parameter_value) > 200:
510
+ context["parameter_value"] = parameter_value[:200] + "...[TRUNCATED]"
511
+ else:
512
+ context["parameter_value"] = parameter_value
513
+
514
+ super().__init__(message, validation_type="mcp_parameter", context=context, **kwargs)
515
+ self.tool_name = tool_name
516
+ self.parameter_name = parameter_name
517
+ self.validation_rule = validation_rule
518
+
519
+
520
+ class FileRestrictionError(SecurityError):
521
+ """Raised when file access is restricted by mode or security policy."""
522
+
523
+ def __init__(
524
+ self,
525
+ message: str,
526
+ file_path: str | Path | None = None,
527
+ current_mode: str | None = None,
528
+ allowed_patterns: list[str] | None = None,
529
+ **kwargs: Any,
530
+ ) -> None:
531
+ context = kwargs.get("context", {})
532
+ if current_mode:
533
+ context["current_mode"] = current_mode
534
+ if allowed_patterns:
535
+ context["allowed_patterns"] = allowed_patterns
536
+
537
+ super().__init__(
538
+ message,
539
+ security_type="file_restriction",
540
+ file_path=file_path,
541
+ context=context,
542
+ **kwargs
543
+ )
544
+ self.current_mode = current_mode
545
+ self.allowed_patterns = allowed_patterns
546
+
547
+
548
+ # Enhanced error response utilities for MCP
549
+ def create_mcp_error_response(
550
+ exception: Exception,
551
+ tool_name: str | None = None,
552
+ include_debug_info: bool = False,
553
+ sanitize_sensitive: bool = True,
554
+ ) -> dict[str, Any]:
555
+ """
556
+ Create standardized MCP error response dictionary.
557
+
558
+ Args:
559
+ exception: The exception to convert
560
+ tool_name: Name of the MCP tool that failed
561
+ include_debug_info: Whether to include debug information
562
+ sanitize_sensitive: Whether to sanitize sensitive information
563
+
564
+ Returns:
565
+ MCP-compliant error response dictionary
566
+ """
567
+ import traceback
568
+
569
+ response: dict[str, Any] = {
570
+ "success": False,
571
+ "error": {
572
+ "type": exception.__class__.__name__,
573
+ "message": str(exception),
574
+ "timestamp": __import__("datetime").datetime.utcnow().isoformat() + "Z"
575
+ }
576
+ }
577
+
578
+ # Add tool name if provided
579
+ if tool_name:
580
+ response["error"]["tool"] = tool_name
581
+
582
+ # Add context if available
583
+ if hasattr(exception, "context") and exception.context:
584
+ context = exception.context.copy()
585
+
586
+ # Sanitize sensitive information if requested
587
+ if sanitize_sensitive:
588
+ context = _sanitize_error_context(context)
589
+
590
+ response["error"]["context"] = context
591
+
592
+ # Add error code if available
593
+ if hasattr(exception, "error_code"):
594
+ response["error"]["code"] = exception.error_code
595
+
596
+ # Add debug information if requested
597
+ if include_debug_info:
598
+ response["error"]["debug"] = {
599
+ "traceback": traceback.format_exc(),
600
+ "exception_args": list(exception.args) if exception.args else []
601
+ }
602
+
603
+ # Add specific error details for known exception types
604
+ if isinstance(exception, MCPToolError):
605
+ response["error"]["execution_stage"] = exception.execution_stage
606
+ elif isinstance(exception, MCPTimeoutError):
607
+ response["error"]["timeout_seconds"] = exception.timeout_seconds
608
+ elif isinstance(exception, FileRestrictionError):
609
+ response["error"]["current_mode"] = exception.current_mode
610
+ response["error"]["allowed_patterns"] = exception.allowed_patterns
611
+
612
+ return response
613
+
614
+
615
+ def _sanitize_error_context(context: dict[str, Any]) -> dict[str, Any]:
616
+ """Sanitize sensitive information from error context."""
617
+ sanitized = {}
618
+ sensitive_keys = {
619
+ "password", "token", "key", "secret", "auth", "credential",
620
+ "api_key", "access_token", "private_key", "session_id"
621
+ }
622
+
623
+ for key, value in context.items():
624
+ if any(sensitive in key.lower() for sensitive in sensitive_keys):
625
+ sanitized[key] = "***REDACTED***"
626
+ elif isinstance(value, str) and len(value) > 500:
627
+ sanitized[key] = value[:500] + "...[TRUNCATED]"
628
+ elif isinstance(value, (list, tuple)) and len(value) > 10:
629
+ sanitized[key] = list(value[:10]) + ["...[TRUNCATED]"]
630
+ elif isinstance(value, dict) and len(value) > 20:
631
+ # Recursively sanitize nested dictionaries
632
+ truncated_dict = dict(list(value.items())[:20])
633
+ sanitized[key] = _sanitize_error_context(truncated_dict)
634
+ sanitized[key]["__truncated__"] = True
635
+ else:
636
+ sanitized[key] = value
637
+
638
+ return sanitized
639
+
640
+
641
+ # Async exception handling utilities for MCP tools
642
+ async def safe_execute_async(
643
+ coro: Any,
644
+ default_return: Any = None,
645
+ exception_types: tuple[type[Exception], ...] = (Exception,),
646
+ log_errors: bool = True,
647
+ tool_name: str | None = None,
648
+ ) -> Any:
649
+ """
650
+ Safely execute an async function with exception handling.
651
+
652
+ Args:
653
+ coro: Coroutine to execute
654
+ default_return: Value to return on exception
655
+ exception_types: Exception types to catch
656
+ log_errors: Whether to log errors
657
+ tool_name: Name of the tool for error context
658
+
659
+ Returns:
660
+ Coroutine result or default_return on exception
661
+ """
662
+ try:
663
+ return await coro
664
+ except exception_types as e:
665
+ if log_errors:
666
+ from .utils import log_error
667
+
668
+ error_context = {"tool_name": tool_name} if tool_name else {}
669
+ log_error(f"Async execution failed: {e}", extra=error_context)
670
+
671
+ return default_return
672
+
673
+
674
+ def mcp_exception_handler(
675
+ tool_name: str,
676
+ include_debug: bool = False,
677
+ sanitize_sensitive: bool = True,
678
+ ) -> Any:
679
+ """
680
+ Decorator for MCP tool exception handling.
681
+
682
+ Args:
683
+ tool_name: Name of the MCP tool
684
+ include_debug: Whether to include debug information
685
+ sanitize_sensitive: Whether to sanitize sensitive information
686
+ """
687
+ def decorator(func: Any) -> Any:
688
+ async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
689
+ try:
690
+ return await func(*args, **kwargs)
691
+ except Exception as e:
692
+ from .utils import log_error
693
+
694
+ # Log the error with tool context
695
+ log_error(
696
+ f"MCP tool '{tool_name}' failed: {e}",
697
+ extra={"tool_name": tool_name, "exception_type": type(e).__name__}
698
+ )
699
+
700
+ # Return standardized error response
701
+ return create_mcp_error_response(
702
+ e,
703
+ tool_name=tool_name,
704
+ include_debug_info=include_debug,
705
+ sanitize_sensitive=sanitize_sensitive
706
+ )
707
+
708
+ def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
709
+ try:
710
+ return func(*args, **kwargs)
711
+ except Exception as e:
712
+ from .utils import log_error
713
+
714
+ # Log the error with tool context
715
+ log_error(
716
+ f"MCP tool '{tool_name}' failed: {e}",
717
+ extra={"tool_name": tool_name, "exception_type": type(e).__name__}
718
+ )
719
+
720
+ # Return standardized error response
721
+ return create_mcp_error_response(
722
+ e,
723
+ tool_name=tool_name,
724
+ include_debug_info=include_debug,
725
+ sanitize_sensitive=sanitize_sensitive
726
+ )
727
+
728
+ # Return appropriate wrapper based on function type
729
+ if __import__("asyncio").iscoroutinefunction(func):
730
+ return async_wrapper
731
+ else:
732
+ return sync_wrapper
733
+
734
+ return decorator
@@ -8,7 +8,22 @@ This module provides file reading functionality with encoding detection and fall
8
8
  from pathlib import Path
9
9
 
10
10
  from .encoding_utils import read_file_safe
11
- from .utils import log_error, log_info, log_warning
11
+ from .utils import setup_logger
12
+
13
+ # Set up logger for this module
14
+ logger = setup_logger(__name__)
15
+
16
+ def log_error(message: str, *args, **kwargs) -> None:
17
+ """Log error message"""
18
+ logger.error(message, *args, **kwargs)
19
+
20
+ def log_info(message: str, *args, **kwargs) -> None:
21
+ """Log info message"""
22
+ logger.info(message, *args, **kwargs)
23
+
24
+ def log_warning(message: str, *args, **kwargs) -> None:
25
+ """Log warning message"""
26
+ logger.warning(message, *args, **kwargs)
12
27
 
13
28
 
14
29
  def detect_language_from_extension(file_path: str) -> str:
@@ -12,6 +12,8 @@ import logging
12
12
  import sys
13
13
  from typing import Any
14
14
 
15
+ from .. import __version__
16
+
15
17
  try:
16
18
  from mcp.server import Server
17
19
  from mcp.server.models import InitializationOptions
@@ -68,7 +70,7 @@ class TreeSitterAnalyzerMCPServer:
68
70
 
69
71
  self.server: Server | None = None
70
72
  self.name = "tree-sitter-analyzer"
71
- self.version = "2.0.0"
73
+ self.version = __version__
72
74
 
73
75
  log_info(f"Initializing {self.name} v{self.version}")
74
76
 
@@ -66,6 +66,10 @@ class LanguageDetector:
66
66
  ".mkd": "markdown",
67
67
  ".mkdn": "markdown",
68
68
  ".mdx": "markdown",
69
+ # JSON系
70
+ ".json": "json",
71
+ ".jsonc": "json",
72
+ ".json5": "json",
69
73
  }
70
74
 
71
75
  # Ambiguous extensions (map to multiple languages)
@@ -100,6 +104,7 @@ class LanguageDetector:
100
104
  "rust",
101
105
  "go",
102
106
  "markdown",
107
+ "json",
103
108
  }
104
109
 
105
110
  def __init__(self) -> None:
@@ -143,6 +148,10 @@ class LanguageDetector:
143
148
  ".mkd": ("markdown", 0.8),
144
149
  ".mkdn": ("markdown", 0.8),
145
150
  ".mdx": ("markdown", 0.7), # MDX might be mixed with JSX
151
+ # JSON extensions
152
+ ".json": ("json", 0.9),
153
+ ".jsonc": ("json", 0.8), # JSON with comments
154
+ ".json5": ("json", 0.8), # JSON5 format
146
155
  }
147
156
 
148
157
  # Content-based detection patterns
@@ -412,7 +421,9 @@ def detect_language_from_file(file_path: str) -> str:
412
421
  Returns:
413
422
  Detected language name
414
423
  """
415
- return detector.detect_from_extension(file_path)
424
+ # Create a fresh instance to ensure latest configuration
425
+ fresh_detector = LanguageDetector()
426
+ return fresh_detector.detect_from_extension(file_path)
416
427
 
417
428
 
418
429
  def is_language_supported(language: str) -> bool:
@@ -379,19 +379,27 @@ class AnalyzeScaleTool(BaseMCPTool):
379
379
  include_details = arguments.get("include_details", False)
380
380
  include_guidance = arguments.get("include_guidance", True)
381
381
 
382
+ # Security validation BEFORE path resolution to catch symlinks
383
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path)
384
+ if not is_valid:
385
+ logger.warning(
386
+ f"Security validation failed for file path: {file_path} - {error_msg}"
387
+ )
388
+ raise ValueError(f"Invalid file path: {error_msg}")
389
+
382
390
  # Resolve file path to absolute path
383
391
  resolved_file_path = self.path_resolver.resolve(file_path)
384
392
  logger.info(f"Analyzing file: {file_path} (resolved to: {resolved_file_path})")
385
393
 
386
- # Security validation using resolved path
394
+ # Additional security validation on resolved path
387
395
  is_valid, error_msg = self.security_validator.validate_file_path(
388
396
  resolved_file_path
389
397
  )
390
398
  if not is_valid:
391
399
  logger.warning(
392
- f"Security validation failed for file path: {resolved_file_path} - {error_msg}"
400
+ f"Security validation failed for resolved path: {resolved_file_path} - {error_msg}"
393
401
  )
394
- raise ValueError(f"Invalid file path: {error_msg}")
402
+ raise ValueError(f"Invalid resolved path: {error_msg}")
395
403
 
396
404
  # Sanitize inputs
397
405
  if language:
@@ -423,6 +431,12 @@ class AnalyzeScaleTool(BaseMCPTool):
423
431
  # Calculate basic file metrics
424
432
  file_metrics = self._calculate_file_metrics(resolved_file_path)
425
433
 
434
+ # Handle JSON files specially - they don't need structural analysis
435
+ if language == "json":
436
+ return self._create_json_file_analysis(
437
+ resolved_file_path, file_metrics, include_guidance
438
+ )
439
+
426
440
  # Use appropriate analyzer based on language
427
441
  if language == "java":
428
442
  # Use AdvancedAnalyzer for comprehensive analysis
@@ -472,6 +486,7 @@ class AnalyzeScaleTool(BaseMCPTool):
472
486
 
473
487
  # Build enhanced result structure
474
488
  result = {
489
+ "success": True,
475
490
  "file_path": file_path,
476
491
  "language": language,
477
492
  "file_metrics": file_metrics,
@@ -688,6 +703,56 @@ class AnalyzeScaleTool(BaseMCPTool):
688
703
 
689
704
  return True
690
705
 
706
+ def _create_json_file_analysis(
707
+ self, file_path: str, file_metrics: dict[str, Any], include_guidance: bool
708
+ ) -> dict[str, Any]:
709
+ """
710
+ Create analysis result for JSON files.
711
+
712
+ Args:
713
+ file_path: Path to the JSON file
714
+ file_metrics: Basic file metrics
715
+ include_guidance: Whether to include guidance
716
+
717
+ Returns:
718
+ Analysis result for JSON file
719
+ """
720
+ result = {
721
+ "success": True,
722
+ "file_path": file_path,
723
+ "language": "json",
724
+ "file_size_bytes": file_metrics["file_size_bytes"],
725
+ "total_lines": file_metrics["total_lines"],
726
+ "non_empty_lines": file_metrics["total_lines"] - file_metrics["blank_lines"],
727
+ "estimated_tokens": file_metrics["estimated_tokens"],
728
+ "complexity_metrics": {
729
+ "total_elements": 0,
730
+ "max_depth": 0,
731
+ "avg_complexity": 0.0,
732
+ },
733
+ "structural_overview": {
734
+ "classes": [],
735
+ "methods": [],
736
+ "fields": [],
737
+ },
738
+ "scale_category": "small" if file_metrics["total_lines"] < 100 else "medium" if file_metrics["total_lines"] < 1000 else "large",
739
+ "analysis_recommendations": {
740
+ "suitable_for_full_analysis": file_metrics["total_lines"] < 1000,
741
+ "recommended_approach": "JSON files are configuration/data files - structural analysis not applicable",
742
+ "token_efficiency_notes": "JSON files can be read directly without tree-sitter parsing",
743
+ },
744
+ }
745
+
746
+ if include_guidance:
747
+ result["llm_analysis_guidance"] = {
748
+ "file_characteristics": "JSON configuration/data file",
749
+ "recommended_workflow": "Direct file reading for content analysis",
750
+ "token_optimization": "Use simple file reading tools for JSON content",
751
+ "analysis_focus": "Data structure and configuration values",
752
+ }
753
+
754
+ return result
755
+
691
756
  def get_tool_definition(self) -> dict[str, Any]:
692
757
  """
693
758
  Get the MCP tool definition for check_code_scale.
@@ -11,6 +11,7 @@ from __future__ import annotations
11
11
  import asyncio
12
12
  import json
13
13
  import os
14
+ import shutil
14
15
  import tempfile
15
16
  from dataclasses import dataclass
16
17
  from pathlib import Path
@@ -27,6 +28,21 @@ DEFAULT_RG_TIMEOUT_MS = 4000
27
28
  RG_TIMEOUT_HARD_CAP_MS = 30000
28
29
 
29
30
 
31
+ def check_external_command(command: str) -> bool:
32
+ """Check if an external command is available in the system PATH."""
33
+ return shutil.which(command) is not None
34
+
35
+
36
+ def get_missing_commands() -> list[str]:
37
+ """Get list of missing external commands required by fd/rg tools."""
38
+ missing = []
39
+ if not check_external_command("fd"):
40
+ missing.append("fd")
41
+ if not check_external_command("rg"):
42
+ missing.append("rg")
43
+ return missing
44
+
45
+
30
46
  def clamp_int(value: int | None, default_value: int, hard_cap: int) -> int:
31
47
  if value is None:
32
48
  return default_value
@@ -64,13 +80,22 @@ async def run_command_capture(
64
80
  Returns (returncode, stdout, stderr). On timeout, kills process and returns 124.
65
81
  Separated into a util for easy monkeypatching in tests.
66
82
  """
67
- # Create process
68
- proc = await asyncio.create_subprocess_exec(
69
- *cmd,
70
- stdin=asyncio.subprocess.PIPE if input_data is not None else None,
71
- stdout=asyncio.subprocess.PIPE,
72
- stderr=asyncio.subprocess.PIPE,
73
- )
83
+ # Check if command exists before attempting to run
84
+ if cmd and not check_external_command(cmd[0]):
85
+ error_msg = f"Command '{cmd[0]}' not found in PATH. Please install {cmd[0]} to use this functionality."
86
+ return 127, b"", error_msg.encode()
87
+
88
+ try:
89
+ # Create process
90
+ proc = await asyncio.create_subprocess_exec(
91
+ *cmd,
92
+ stdin=asyncio.subprocess.PIPE if input_data is not None else None,
93
+ stdout=asyncio.subprocess.PIPE,
94
+ stderr=asyncio.subprocess.PIPE,
95
+ )
96
+ except FileNotFoundError as e:
97
+ error_msg = f"Command '{cmd[0]}' not found: {e}"
98
+ return 127, b"", error_msg.encode()
74
99
 
75
100
  # Compute timeout seconds
76
101
  timeout_s: float | None = None
@@ -239,6 +239,16 @@ class FindAndGrepTool(BaseMCPTool):
239
239
 
240
240
  @handle_mcp_errors("find_and_grep")
241
241
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
242
+ # Check if both fd and rg commands are available
243
+ missing_commands = fd_rg_utils.get_missing_commands()
244
+ if missing_commands:
245
+ return {
246
+ "success": False,
247
+ "error": f"Required commands not found: {', '.join(missing_commands)}. Please install fd (https://github.com/sharkdp/fd) and ripgrep (https://github.com/BurntSushi/ripgrep) to use this tool.",
248
+ "count": 0,
249
+ "results": []
250
+ }
251
+
242
252
  self.validate_arguments(arguments)
243
253
  roots = self._validate_roots(arguments["roots"]) # absolute validated
244
254
 
@@ -181,6 +181,15 @@ class ListFilesTool(BaseMCPTool):
181
181
 
182
182
  @handle_mcp_errors("list_files")
183
183
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
184
+ # Check if fd command is available
185
+ if not fd_rg_utils.check_external_command("fd"):
186
+ return {
187
+ "success": False,
188
+ "error": "fd command not found. Please install fd (https://github.com/sharkdp/fd) to use this tool.",
189
+ "count": 0,
190
+ "results": []
191
+ }
192
+
184
193
  self.validate_arguments(arguments)
185
194
  roots = self._validate_roots(arguments["roots"]) # normalized absolutes
186
195
 
@@ -113,17 +113,24 @@ class QueryTool(BaseMCPTool):
113
113
  if not file_path:
114
114
  raise ValueError("file_path is required")
115
115
 
116
+ # Security validation BEFORE path resolution to catch symlinks
117
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path)
118
+ if not is_valid:
119
+ raise ValueError(
120
+ f"Invalid or unsafe file path: {error_msg or file_path}"
121
+ )
122
+
116
123
  # Resolve file path to absolute path
117
124
  resolved_file_path = self.path_resolver.resolve(file_path)
118
125
  logger.info(f"Querying file: {file_path} (resolved to: {resolved_file_path})")
119
126
 
120
- # Security validation using resolved path
127
+ # Additional security validation on resolved path
121
128
  is_valid, error_msg = self.security_validator.validate_file_path(
122
129
  resolved_file_path
123
130
  )
124
131
  if not is_valid:
125
132
  raise ValueError(
126
- f"Invalid or unsafe file path: {error_msg or resolved_file_path}"
133
+ f"Invalid or unsafe resolved path: {error_msg or resolved_file_path}"
127
134
  )
128
135
 
129
136
  # Get query parameters
@@ -116,34 +116,70 @@ class ReadPartialTool(BaseMCPTool):
116
116
  suppress_output = arguments.get("suppress_output", False)
117
117
  output_format = arguments.get("format", "text")
118
118
 
119
+ # Security validation BEFORE path resolution to catch symlinks
120
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path, self.project_root)
121
+ if not is_valid:
122
+ logger.warning(
123
+ f"Security validation failed for file path: {file_path} - {error_msg}"
124
+ )
125
+ return {
126
+ "success": False,
127
+ "error": f"Security validation failed: {error_msg}",
128
+ "file_path": file_path
129
+ }
130
+
119
131
  # Resolve file path using common path resolver
120
132
  resolved_path = self.path_resolver.resolve(file_path)
121
133
 
122
- # Security validation (validate resolved absolute path when possible)
123
- is_valid, error_msg = self.security_validator.validate_file_path(resolved_path)
134
+ # Additional security validation on resolved path
135
+ is_valid, error_msg = self.security_validator.validate_file_path(resolved_path, self.project_root)
124
136
  if not is_valid:
125
137
  logger.warning(
126
- f"Security validation failed for file path: {file_path} - {error_msg}"
138
+ f"Security validation failed for resolved path: {resolved_path} - {error_msg}"
127
139
  )
128
- raise ValueError(f"Invalid file path: {error_msg}")
140
+ return {
141
+ "success": False,
142
+ "error": f"Security validation failed for resolved path: {error_msg}",
143
+ "file_path": file_path
144
+ }
129
145
 
130
146
  # Validate file exists
131
147
  if not Path(resolved_path).exists():
132
- raise ValueError("Invalid file path: file does not exist")
148
+ return {
149
+ "success": False,
150
+ "error": "Invalid file path: file does not exist",
151
+ "file_path": file_path
152
+ }
133
153
 
134
154
  # Validate line numbers
135
155
  if start_line < 1:
136
- raise ValueError("start_line must be >= 1")
156
+ return {
157
+ "success": False,
158
+ "error": "start_line must be >= 1",
159
+ "file_path": file_path
160
+ }
137
161
 
138
162
  if end_line is not None and end_line < start_line:
139
- raise ValueError("end_line must be >= start_line")
163
+ return {
164
+ "success": False,
165
+ "error": "end_line must be >= start_line",
166
+ "file_path": file_path
167
+ }
140
168
 
141
169
  # Validate column numbers
142
170
  if start_column is not None and start_column < 0:
143
- raise ValueError("start_column must be >= 0")
171
+ return {
172
+ "success": False,
173
+ "error": "start_column must be >= 0",
174
+ "file_path": file_path
175
+ }
144
176
 
145
177
  if end_column is not None and end_column < 0:
146
- raise ValueError("end_column must be >= 0")
178
+ return {
179
+ "success": False,
180
+ "error": "end_column must be >= 0",
181
+ "file_path": file_path
182
+ }
147
183
 
148
184
  logger.info(
149
185
  f"Reading partial content from {file_path}: lines {start_line}-{end_line or 'end'}"
@@ -160,9 +196,19 @@ class ReadPartialTool(BaseMCPTool):
160
196
  )
161
197
 
162
198
  if content is None:
163
- raise RuntimeError(
164
- f"Failed to read partial content from file: {file_path}"
165
- )
199
+ return {
200
+ "success": False,
201
+ "error": f"Failed to read partial content from file: {file_path}",
202
+ "file_path": file_path
203
+ }
204
+
205
+ # Check if content is empty or invalid range
206
+ if not content or content.strip() == "":
207
+ return {
208
+ "success": False,
209
+ "error": f"Invalid line range or empty content: start_line={start_line}, end_line={end_line}",
210
+ "file_path": file_path
211
+ }
166
212
 
167
213
  # Build result structure compatible with CLI --partial-read format
168
214
  result_data = {
@@ -198,8 +244,14 @@ class ReadPartialTool(BaseMCPTool):
198
244
  f"Successfully read {len(content)} characters from {file_path}"
199
245
  )
200
246
 
247
+ # Calculate lines extracted
248
+ lines_extracted = len(content.split('\n')) if content else 0
249
+ if end_line:
250
+ lines_extracted = end_line - start_line + 1
251
+
201
252
  # Build result - conditionally include partial_content_result based on suppress_output
202
253
  result = {
254
+ "success": True,
203
255
  "file_path": file_path,
204
256
  "range": {
205
257
  "start_line": start_line,
@@ -208,11 +260,39 @@ class ReadPartialTool(BaseMCPTool):
208
260
  "end_column": end_column,
209
261
  },
210
262
  "content_length": len(content),
263
+ "lines_extracted": lines_extracted,
211
264
  }
212
265
 
213
266
  # Only include partial_content_result if not suppressed or no output file specified
214
267
  if not suppress_output or not output_file:
215
- result["partial_content_result"] = cli_output
268
+ if output_format == "json":
269
+ # For JSON format, return structured data with exact line count
270
+ lines = content.split('\n') if content else []
271
+
272
+ # If end_line is specified, ensure we return exactly the requested number of lines
273
+ if end_line and len(lines) > lines_extracted:
274
+ lines = lines[:lines_extracted]
275
+ elif end_line and len(lines) < lines_extracted:
276
+ # Pad with empty lines if needed (shouldn't normally happen)
277
+ lines.extend([''] * (lines_extracted - len(lines)))
278
+
279
+ result["partial_content_result"] = {
280
+ "lines": lines,
281
+ "metadata": {
282
+ "file_path": file_path,
283
+ "range": {
284
+ "start_line": start_line,
285
+ "end_line": end_line,
286
+ "start_column": start_column,
287
+ "end_column": end_column,
288
+ },
289
+ "content_length": len(content),
290
+ "lines_count": len(lines)
291
+ }
292
+ }
293
+ else:
294
+ # For text/raw format, return CLI-compatible string
295
+ result["partial_content_result"] = cli_output
216
296
 
217
297
  # Handle file output if requested
218
298
  if output_file:
@@ -254,7 +334,11 @@ class ReadPartialTool(BaseMCPTool):
254
334
 
255
335
  except Exception as e:
256
336
  logger.error(f"Error reading partial content from {file_path}: {e}")
257
- raise
337
+ return {
338
+ "success": False,
339
+ "error": str(e),
340
+ "file_path": file_path
341
+ }
258
342
 
259
343
  def _read_file_partial(
260
344
  self,
@@ -289,6 +289,15 @@ class SearchContentTool(BaseMCPTool):
289
289
 
290
290
  @handle_mcp_errors("search_content")
291
291
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any] | int:
292
+ # Check if rg command is available
293
+ if not fd_rg_utils.check_external_command("rg"):
294
+ return {
295
+ "success": False,
296
+ "error": "rg (ripgrep) command not found. Please install ripgrep (https://github.com/BurntSushi/ripgrep) to use this tool.",
297
+ "count": 0,
298
+ "results": []
299
+ }
300
+
292
301
  self.validate_arguments(arguments)
293
302
 
294
303
  roots = arguments.get("roots")
@@ -242,6 +242,7 @@ class TableFormatTool(BaseMCPTool):
242
242
  package_info = {"name": packages[0].name}
243
243
 
244
244
  return {
245
+ "success": True,
245
246
  "file_path": result.file_path,
246
247
  "language": result.language,
247
248
  "package": package_info,
@@ -378,18 +379,26 @@ class TableFormatTool(BaseMCPTool):
378
379
  output_file = args.get("output_file")
379
380
  suppress_output = args.get("suppress_output", False)
380
381
 
382
+ # Security validation BEFORE path resolution to catch symlinks
383
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path)
384
+ if not is_valid:
385
+ self.logger.warning(
386
+ f"Security validation failed for file path: {file_path} - {error_msg}"
387
+ )
388
+ raise ValueError(f"Invalid file path: {error_msg}")
389
+
381
390
  # Resolve file path using common path resolver
382
391
  resolved_path = self.path_resolver.resolve(file_path)
383
392
 
384
- # Security validation
393
+ # Additional security validation on resolved path
385
394
  is_valid, error_msg = self.security_validator.validate_file_path(
386
395
  resolved_path
387
396
  )
388
397
  if not is_valid:
389
398
  self.logger.warning(
390
- f"Security validation failed for file path: {file_path} - {error_msg}"
399
+ f"Security validation failed for resolved path: {resolved_path} - {error_msg}"
391
400
  )
392
- raise ValueError(f"Invalid file path: {error_msg}")
401
+ raise ValueError(f"Invalid resolved path: {error_msg}")
393
402
 
394
403
  # Sanitize format_type input
395
404
  if format_type:
@@ -470,6 +479,7 @@ class TableFormatTool(BaseMCPTool):
470
479
 
471
480
  # Build result - conditionally include table_output based on suppress_output
472
481
  result = {
482
+ "success": True,
473
483
  "format_type": format_type,
474
484
  "file_path": file_path,
475
485
  "language": language,
@@ -100,12 +100,13 @@ class SecurityValidator:
100
100
 
101
101
  # Layer 4: Absolute path check (cross-platform)
102
102
  if Path(file_path).is_absolute() or file_path.startswith(("/", "\\")):
103
+ log_debug(f"Processing absolute path: {file_path}")
103
104
  # If project boundaries are configured, enforce them strictly
104
105
  if self.boundary_manager and self.boundary_manager.project_root:
105
106
  if not self.boundary_manager.is_within_project(file_path):
106
107
  return False, "Absolute path must be within project directory"
107
- # Within project
108
- return True, ""
108
+ # Within project - continue with symlink checks
109
+ log_debug("Absolute path is within project, continuing with symlink checks")
109
110
  else:
110
111
  # In test/dev contexts without project boundaries, allow absolute
111
112
  # paths under system temp folder only (safe sandbox)
@@ -113,12 +114,13 @@ class SecurityValidator:
113
114
 
114
115
  temp_dir = Path(tempfile.gettempdir()).resolve()
115
116
  real_path = Path(file_path).resolve()
117
+ log_debug(f"Checking if {real_path} is under temp dir {temp_dir}")
116
118
  try:
117
119
  real_path.relative_to(temp_dir)
118
- return True, ""
120
+ log_debug("Path is under temp directory, continuing with symlink checks")
121
+ # Don't return here - continue with symlink checks
119
122
  except ValueError:
120
- pass
121
- return False, "Absolute file paths are not allowed"
123
+ return False, "Absolute file paths are not allowed"
122
124
 
123
125
  # Layer 5: Path normalization and traversal check
124
126
  norm_path = str(Path(file_path))
@@ -136,12 +138,69 @@ class SecurityValidator:
136
138
  "Access denied. File path must be within project directory",
137
139
  )
138
140
 
139
- # Layer 7: Symbolic link check (if file exists)
141
+ # Layer 7: Symbolic link and junction check (check both original and resolved paths)
142
+ # First check the original file_path directly for symlinks and junctions
143
+ try:
144
+ original_path = Path(file_path)
145
+ log_debug(f"Checking symlink status for original path: {original_path}")
146
+ # Check for symlinks even if the file doesn't exist yet (broken symlinks)
147
+ is_symlink = original_path.is_symlink()
148
+ log_debug(f"original_path.is_symlink() = {is_symlink}")
149
+ if is_symlink:
150
+ log_warning(f"Symbolic link detected in original path: {original_path}")
151
+ return False, "Symbolic links are not allowed"
152
+
153
+ # Additional check for Windows junctions and reparse points (only if exists)
154
+ if original_path.exists() and self._is_junction_or_reparse_point(original_path):
155
+ log_warning(f"Junction or reparse point detected in original path: {original_path}")
156
+ return False, "Junctions and reparse points are not allowed"
157
+
158
+ except (OSError, PermissionError) as e:
159
+ # If we can't check symlink status, continue with other checks
160
+ log_debug(f"Exception checking symlink status: {e}")
161
+ pass
162
+
163
+ # Then check the full path (base_path + norm_path) if base_path is provided
140
164
  if base_path:
141
165
  full_path = Path(base_path) / norm_path
142
- if full_path.exists() and full_path.is_symlink():
143
- log_warning(f"Symbolic link detected: {full_path}")
144
- return False, "Symbolic links are not allowed"
166
+
167
+ # Check if the full path is a symlink or junction
168
+ try:
169
+ # Check for symlinks even if the file doesn't exist yet (broken symlinks)
170
+ if full_path.is_symlink():
171
+ log_warning(f"Symbolic link detected: {full_path}")
172
+ return False, "Symbolic links are not allowed"
173
+
174
+ # Additional check for Windows junctions and reparse points (only if exists)
175
+ if full_path.exists() and self._is_junction_or_reparse_point(full_path):
176
+ log_warning(f"Junction or reparse point detected: {full_path}")
177
+ return False, "Junctions and reparse points are not allowed"
178
+
179
+ except (OSError, PermissionError):
180
+ # If we can't check symlink status due to permissions, be cautious
181
+ log_warning(f"Cannot verify symlink status for: {full_path}")
182
+ pass
183
+
184
+ # Check parent directories for junctions (Windows-specific security measure)
185
+ try:
186
+ if self._has_junction_in_path(full_path):
187
+ log_warning(f"Junction detected in path hierarchy: {full_path}")
188
+ return False, "Paths containing junctions are not allowed"
189
+ except (OSError, PermissionError):
190
+ # If we can't check parent directories, continue
191
+ pass
192
+ else:
193
+ # For absolute paths or when no base_path is provided, use original_path
194
+ full_path = original_path
195
+
196
+ # Check parent directories for junctions
197
+ try:
198
+ if self._has_junction_in_path(full_path):
199
+ log_warning(f"Junction detected in path hierarchy: {full_path}")
200
+ return False, "Paths containing junctions are not allowed"
201
+ except (OSError, PermissionError):
202
+ # If we can't check parent directories, continue
203
+ pass
145
204
 
146
205
  log_debug(f"File path validation passed: {file_path}")
147
206
  return True, ""
@@ -268,3 +327,103 @@ class SecurityValidator:
268
327
  except Exception as e:
269
328
  log_warning(f"Glob pattern validation error: {e}")
270
329
  return False, f"Validation error: {str(e)}"
330
+
331
+ def validate_path(self, path: str, base_path: str | None = None) -> tuple[bool, str]:
332
+ """
333
+ Alias for validate_file_path for backward compatibility.
334
+
335
+ Args:
336
+ path: Path to validate
337
+ base_path: Optional base path for relative path validation
338
+
339
+ Returns:
340
+ Tuple of (is_valid, error_message)
341
+ """
342
+ return self.validate_file_path(path, base_path)
343
+
344
+ def is_safe_path(self, path: str, base_path: str | None = None) -> bool:
345
+ """
346
+ Check if a path is safe (backward compatibility method).
347
+
348
+ Args:
349
+ path: Path to check
350
+ base_path: Optional base path for relative path validation
351
+
352
+ Returns:
353
+ True if path is safe, False otherwise
354
+ """
355
+ is_valid, _ = self.validate_file_path(path, base_path)
356
+ return is_valid
357
+
358
+ def _is_junction_or_reparse_point(self, path: Path) -> bool:
359
+ """
360
+ Check if a path is a Windows junction or reparse point.
361
+
362
+ Args:
363
+ path: Path to check
364
+
365
+ Returns:
366
+ True if the path is a junction or reparse point
367
+ """
368
+ try:
369
+ import platform
370
+ if platform.system() != "Windows":
371
+ return False
372
+
373
+ # On Windows, check for reparse points using stat
374
+ import stat
375
+ if path.exists():
376
+ path_stat = path.stat()
377
+ # Check if it has the reparse point attribute
378
+ if hasattr(stat, 'FILE_ATTRIBUTE_REPARSE_POINT'):
379
+ return bool(path_stat.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT)
380
+
381
+ # Alternative method using Windows API
382
+ try:
383
+ import ctypes
384
+ from ctypes import wintypes
385
+
386
+ # GetFileAttributesW function
387
+ _GetFileAttributesW = ctypes.windll.kernel32.GetFileAttributesW
388
+ _GetFileAttributesW.argtypes = [wintypes.LPCWSTR]
389
+ _GetFileAttributesW.restype = wintypes.DWORD
390
+
391
+ FILE_ATTRIBUTE_REPARSE_POINT = 0x400
392
+ INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF
393
+
394
+ attributes = _GetFileAttributesW(str(path))
395
+ if attributes != INVALID_FILE_ATTRIBUTES:
396
+ return bool(attributes & FILE_ATTRIBUTE_REPARSE_POINT)
397
+
398
+ except (ImportError, AttributeError, OSError):
399
+ pass
400
+
401
+ except Exception:
402
+ # If any error occurs, assume it's not a junction for safety
403
+ pass
404
+
405
+ return False
406
+
407
+ def _has_junction_in_path(self, path: Path) -> bool:
408
+ """
409
+ Check if any parent directory in the path is a junction.
410
+
411
+ Args:
412
+ path: Path to check
413
+
414
+ Returns:
415
+ True if any parent directory is a junction
416
+ """
417
+ try:
418
+ current_path = path.resolve() if path.exists() else path
419
+
420
+ # Check each parent directory
421
+ for parent in current_path.parents:
422
+ if self._is_junction_or_reparse_point(parent):
423
+ return True
424
+
425
+ except Exception:
426
+ # If any error occurs, assume no junctions for safety
427
+ pass
428
+
429
+ return False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tree-sitter-analyzer
3
- Version: 1.7.5
3
+ Version: 1.7.7
4
4
  Summary: Extensible multi-language code analyzer framework using Tree-sitter with dynamic plugin architecture
5
5
  Project-URL: Homepage, https://github.com/aimasteracc/tree-sitter-analyzer
6
6
  Project-URL: Documentation, https://github.com/aimasteracc/tree-sitter-analyzer#readme
@@ -34,6 +34,7 @@ Requires-Python: >=3.10
34
34
  Requires-Dist: cachetools>=5.0.0
35
35
  Requires-Dist: chardet>=5.0.0
36
36
  Requires-Dist: mcp>=1.12.3
37
+ Requires-Dist: psutil>=5.9.8
37
38
  Requires-Dist: tree-sitter-cpp<0.25.0,>=0.23.4
38
39
  Requires-Dist: tree-sitter-java<0.25.0,>=0.23.5
39
40
  Requires-Dist: tree-sitter-javascript<0.25.0,>=0.23.1
@@ -123,6 +124,21 @@ Requires-Dist: tree-sitter-typescript<0.25.0,>=0.20.0; extra == 'full'
123
124
  Requires-Dist: types-psutil>=5.9.0; extra == 'full'
124
125
  Provides-Extra: go
125
126
  Requires-Dist: tree-sitter-go<0.25.0,>=0.20.0; extra == 'go'
127
+ Provides-Extra: integration
128
+ Requires-Dist: anyio>=4.0.0; extra == 'integration'
129
+ Requires-Dist: mcp>=1.12.2; extra == 'integration'
130
+ Requires-Dist: memory-profiler>=0.61.0; extra == 'integration'
131
+ Requires-Dist: psutil>=5.9.8; extra == 'integration'
132
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'integration'
133
+ Requires-Dist: pytest-benchmark>=4.0.0; extra == 'integration'
134
+ Requires-Dist: pytest-cov>=4.0.0; extra == 'integration'
135
+ Requires-Dist: pytest-mock>=3.14.1; extra == 'integration'
136
+ Requires-Dist: pytest>=8.4.1; extra == 'integration'
137
+ Requires-Dist: tree-sitter-java>=0.23.5; extra == 'integration'
138
+ Requires-Dist: tree-sitter-javascript>=0.23.1; extra == 'integration'
139
+ Requires-Dist: tree-sitter-markdown>=0.3.1; extra == 'integration'
140
+ Requires-Dist: tree-sitter-python>=0.23.0; extra == 'integration'
141
+ Requires-Dist: tree-sitter-typescript>=0.20.0; extra == 'integration'
126
142
  Provides-Extra: java
127
143
  Requires-Dist: tree-sitter-java<0.25.0,>=0.23.5; extra == 'java'
128
144
  Provides-Extra: javascript
@@ -171,7 +187,7 @@ Description-Content-Type: text/markdown
171
187
 
172
188
  [![Python Version](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://python.org)
173
189
  [![License](https://img.shields.io/badge/license-MIT-green.svg)](LICENSE)
174
- [![Tests](https://img.shields.io/badge/tests-2934%20passed-brightgreen.svg)](#quality-assurance)
190
+ [![Tests](https://img.shields.io/badge/tests-3088%20passed-brightgreen.svg)](#quality-assurance)
175
191
  [![Coverage](https://codecov.io/gh/aimasteracc/tree-sitter-analyzer/branch/main/graph/badge.svg)](https://codecov.io/gh/aimasteracc/tree-sitter-analyzer)
176
192
  [![Quality](https://img.shields.io/badge/quality-enterprise%20grade-blue.svg)](#quality-assurance)
177
193
  [![PyPI](https://img.shields.io/pypi/v/tree-sitter-analyzer.svg)](https://pypi.org/project/tree-sitter-analyzer/)
@@ -226,7 +242,7 @@ Tree-sitter Analyzer is an enterprise-grade code analysis tool designed for the
226
242
  | **Go** | Basic Support | Basic syntax parsing |
227
243
 
228
244
  ### 🏆 Production Ready
229
- - **2,934 Tests** - 100% pass rate, enterprise-grade quality assurance
245
+ - **3,088 Tests** - 100% pass rate, enterprise-grade quality assurance
230
246
  - **High Coverage** - Comprehensive test coverage
231
247
  - **Cross-platform Support** - Compatible with Windows, macOS, Linux
232
248
  - **Continuous Maintenance** - Active development and community support
@@ -737,13 +753,13 @@ uv run python -m tree_sitter_analyzer --show-query-languages
737
753
  ## 8. 🏆 Quality Assurance
738
754
 
739
755
  ### 📊 Quality Metrics
740
- - **2,934 tests** - 100% pass rate ✅
756
+ - **3,088 tests** - 100% pass rate ✅
741
757
  - **High code coverage** - Comprehensive test suite
742
758
  - **Zero test failures** - Production ready
743
759
  - **Cross-platform support** - Windows, macOS, Linux
744
760
 
745
761
  ### ⚡ Latest Quality Achievements (v1.7.5)
746
- - ✅ **📊 Enhanced Quality Metrics** - Test count increased to 2,934, coverage maintained at high levels
762
+ - ✅ **📊 Enhanced Quality Metrics** - Test count increased to 3,088 coverage maintained at high levels
747
763
  - ✅ **🔧 System Stability** - All tests passing with enhanced system stability and reliability
748
764
  - ✅ **🆕 Complete Markdown Support** - Added new complete Markdown language plugin supporting all major Markdown elements
749
765
  - ✅ **📝 Enhanced Document Analysis** - Support for intelligent extraction of headers, code blocks, links, images, tables, task lists
@@ -1,12 +1,12 @@
1
- tree_sitter_analyzer/__init__.py,sha256=pwmo3OAgET_ifX0qH_OG7f8YXyg4Xp2DLDEIGR3rs3k,3067
1
+ tree_sitter_analyzer/__init__.py,sha256=xWap73ImPJpNVKE4Ilozpzocq52xSK3vtvVDYU3W2kI,3067
2
2
  tree_sitter_analyzer/__main__.py,sha256=Zl79tpe4UaMu-7yeztc06tgP0CVMRnvGgas4ZQP5SCs,228
3
- tree_sitter_analyzer/api.py,sha256=jzwID6fJNdhQkJP3D0lzBVPhOnGIN4tyyMtmRYdK9zI,22753
3
+ tree_sitter_analyzer/api.py,sha256=BzH-0MmK7qoNvsTAmLovFq3E_50ci0CoozzYXJdn3cQ,22759
4
4
  tree_sitter_analyzer/cli_main.py,sha256=BuaM-L-Jx3G49qvAUOQVsw0wEM-X0UzPaRszRZBist4,10374
5
5
  tree_sitter_analyzer/constants.py,sha256=7w3sLFt_6vPaKsxzrc21K1rOKpLGMyyA1203nu3pDOQ,1889
6
6
  tree_sitter_analyzer/encoding_utils.py,sha256=BgdBKnW20EueEFJT-aLrQI38bTOcR5rWQ3Dpa-ALszA,14805
7
- tree_sitter_analyzer/exceptions.py,sha256=AZryCQyKXekAg8lQZd3zqULnjhCKovBNNpnUlNGDhcI,11615
8
- tree_sitter_analyzer/file_handler.py,sha256=mtWz-DE4yfmak347s0e20xFNy3qddcek58Enom5GlZQ,6689
9
- tree_sitter_analyzer/language_detector.py,sha256=MoGDGqKREBd3GMPJ7QoQCdBmTloWskC5plnjNfDuEO0,12957
7
+ tree_sitter_analyzer/exceptions.py,sha256=XvuUYsB80JYcpfU3Lq0XXpmeV3aWK2tJ_nKNGJtILe4,23116
8
+ tree_sitter_analyzer/file_handler.py,sha256=pyNro47kYoRuQBPkyEo9ileqlQ1A7w890deQltnzMas,7115
9
+ tree_sitter_analyzer/language_detector.py,sha256=9vPer4Lr1tjNR_GMP-OyuLTIldgKszRRddvBVSt1x1E,13353
10
10
  tree_sitter_analyzer/language_loader.py,sha256=sIICLkht_PeVoYV1KTs5bSgOJej9xK9wbyRB2-0Agws,8966
11
11
  tree_sitter_analyzer/models.py,sha256=eZSVTl4s0rnqG21nyCTaJhyhDw1HZkkpMRKCi2QRkL0,20404
12
12
  tree_sitter_analyzer/output_manager.py,sha256=tMEyjGeczqphcLoHdqxgyW8KaG8w6JF-fhsIibNQiCU,8260
@@ -50,7 +50,7 @@ tree_sitter_analyzer/interfaces/__init__.py,sha256=OcT7eNIU0ZXvAeAXbhDqRG3puxn93
50
50
  tree_sitter_analyzer/interfaces/cli.py,sha256=c6CGfF6cgOwgpBimHV1myZ5JfNqil5tCVBOfG5-zijU,17100
51
51
  tree_sitter_analyzer/interfaces/cli_adapter.py,sha256=8j3xL3k6wWrGQCq0KCntqbvSxKy931sT5M96pYhkn9c,11402
52
52
  tree_sitter_analyzer/interfaces/mcp_adapter.py,sha256=iSWcm-bn8_pL6YBu1Rrzherv72-5WUiavColu3uhSAY,7707
53
- tree_sitter_analyzer/interfaces/mcp_server.py,sha256=dUFn1CyO2jLa_y5gGOGE-f0sLGAbjgp738uy5-aAphI,16510
53
+ tree_sitter_analyzer/interfaces/mcp_server.py,sha256=SP0_X4pltjFTH5Mq0NouJbazkYijqjcsNvPegM3WtQE,16542
54
54
  tree_sitter_analyzer/languages/__init__.py,sha256=VTXxJgVjHJAciLhX0zzXOS4EygZMtebeYUbi_0z6fGw,340
55
55
  tree_sitter_analyzer/languages/java_plugin.py,sha256=SEGS-54gF2-kIv8ftYGqq_KNnwPXGw9XnSONlzowHWk,53191
56
56
  tree_sitter_analyzer/languages/javascript_plugin.py,sha256=2O6X5M1ZKQNdWoMmMJXHw5CEk2FxkPjR6wy3iHCyeak,57090
@@ -63,16 +63,16 @@ tree_sitter_analyzer/mcp/resources/__init__.py,sha256=D46ZDhPQaCrQze8dHmijMg1QZQ
63
63
  tree_sitter_analyzer/mcp/resources/code_file_resource.py,sha256=ZX5ZYSJfylBedpL80kTDlco2YZqgRMb5f3OW0VvOVRM,6166
64
64
  tree_sitter_analyzer/mcp/resources/project_stats_resource.py,sha256=YF_LyYwt1uoJx27FvWbVSbIaS5c5RDO-73QL_DfNwTE,20360
65
65
  tree_sitter_analyzer/mcp/tools/__init__.py,sha256=9KfetZTaUhvWTeKuZPYzWb7ZomFQ8SsR1qmXVBT4E7c,739
66
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py,sha256=4ls3IuoePMwyYVNFpHznXuv0MZhVEJV0cNKnnz5zTUo,28087
66
+ tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py,sha256=Yb9lpUPGuvRbawmE15icMRW1yn9BVp6kSXb0iZqaTGY,30920
67
67
  tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py,sha256=mssed7bEfGeGxW4mOf7dg8BDS1oqHLolIBNX9DaZ3DM,8997
68
68
  tree_sitter_analyzer/mcp/tools/base_tool.py,sha256=qf2My325azlnKOugNVMN_R1jtZcjXVy354sGVKzvZls,3546
69
- tree_sitter_analyzer/mcp/tools/fd_rg_utils.py,sha256=R1ICH40vkWO3OdKZjxok9ptQZpZ6-tM5SkLHHOC4-BE,17749
70
- tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py,sha256=uoa0DiX0-M79RYkgq-_HWeJVQUaJpRvE0a1nZLT7Js0,31130
71
- tree_sitter_analyzer/mcp/tools/list_files_tool.py,sha256=atLpBIngGYhmO3hpfujiIz37ng2_Gop1ocEVaV4bs08,17914
72
- tree_sitter_analyzer/mcp/tools/query_tool.py,sha256=yGtivYOdV9AgOd-FkvQY-laiFiw-2TgZgoel6r25xuY,15057
73
- tree_sitter_analyzer/mcp/tools/read_partial_tool.py,sha256=KFmcCJxn1t-IN7Lf9DzbRK2FXytFSFBCPJ9h_mYd9dI,14868
74
- tree_sitter_analyzer/mcp/tools/search_content_tool.py,sha256=WilpA-bOko31aj8lI73luCHoKfTB623vG8tClBowO3Y,32086
75
- tree_sitter_analyzer/mcp/tools/table_format_tool.py,sha256=r7mJMRypBtjcZhKoo7fijt3RW11QbrQy8tWjoMzeWfM,20885
69
+ tree_sitter_analyzer/mcp/tools/fd_rg_utils.py,sha256=9jQ4D5yREs7Nt8L0s-NdVtmaKXJAAOKqAAI8mkW3T2o,18664
70
+ tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py,sha256=9p_Olh6wjuUAVqHfXnsUj3crZtUV4_R6zAfKRqe8jTY,31623
71
+ tree_sitter_analyzer/mcp/tools/list_files_tool.py,sha256=nd6uiOqbsr8NUqNL46d8QnOM-XpMTDOFiaccjBaGLl8,18268
72
+ tree_sitter_analyzer/mcp/tools/query_tool.py,sha256=25k7AHYH4gzhu2XQ5CetBDXDTd_nLdXcEIKmDi6jj04,15367
73
+ tree_sitter_analyzer/mcp/tools/read_partial_tool.py,sha256=lC3Zigp3_v8RU_fi5Fz0O_idjP6z_AmgoqVQOethL3I,18493
74
+ tree_sitter_analyzer/mcp/tools/search_content_tool.py,sha256=wGYjP5OZqqcKnLwF5sdpH3xMuUaxJmwYWU6Rg3R7CAw,32463
75
+ tree_sitter_analyzer/mcp/tools/table_format_tool.py,sha256=gmLqWwFJC63YuyHyMsmCtm-tki1bzeHFvGPIHQTjhig,21398
76
76
  tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py,sha256=-zZnqN9WcoyRTKM_16ADH859LSebzi34BGYwQL2zCOs,25084
77
77
  tree_sitter_analyzer/mcp/utils/__init__.py,sha256=TgTTKsRJAqF95g1fAp5SR_zQVDkImpc_5R0Dw529UUw,3126
78
78
  tree_sitter_analyzer/mcp/utils/error_handler.py,sha256=msrQHX67K3vhJsEc3OPRz5mmWU_yoHz55Lnxy0IZuy4,18404
@@ -92,8 +92,8 @@ tree_sitter_analyzer/queries/typescript.py,sha256=I0bWcCv-sRcZHVsdHdfb1UIRI_G3l0
92
92
  tree_sitter_analyzer/security/__init__.py,sha256=ZTqTt24hsljCpTXAZpJC57L7MU5lJLTf_XnlvEzXwEE,623
93
93
  tree_sitter_analyzer/security/boundary_manager.py,sha256=3eeENRKWtz2pyZHzd8DiVaq8fdeC6s1eVOuBylSmQPg,9347
94
94
  tree_sitter_analyzer/security/regex_checker.py,sha256=jWK6H8PTPgzbwRPfK_RZ8bBTS6rtEbgjY5vr3YWjQ_U,9616
95
- tree_sitter_analyzer/security/validator.py,sha256=yR4qTWEcXpR--bSFwtWvSgY0AzqujOFAqlc1Z7dlTdk,9809
96
- tree_sitter_analyzer-1.7.5.dist-info/METADATA,sha256=KOVhKnK9yXkpDYM_EKNM4tbUYVGkBZBQu0T6A4GKMLY,40685
97
- tree_sitter_analyzer-1.7.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
98
- tree_sitter_analyzer-1.7.5.dist-info/entry_points.txt,sha256=TOW_FpPd7qg0Uq0f44VItnO4HXPdYi7yJUjnQH-0fAM,853
99
- tree_sitter_analyzer-1.7.5.dist-info/RECORD,,
95
+ tree_sitter_analyzer/security/validator.py,sha256=GLsb0TQLzx7mEQ5g-_lLXK8Zo-_r8EM9HtEQCh3vxrc,17037
96
+ tree_sitter_analyzer-1.7.7.dist-info/METADATA,sha256=SEZZeToclVg4umS6VTTw_bNUwP-XC9mSf-MsaxMnlp4,41591
97
+ tree_sitter_analyzer-1.7.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
98
+ tree_sitter_analyzer-1.7.7.dist-info/entry_points.txt,sha256=TOW_FpPd7qg0Uq0f44VItnO4HXPdYi7yJUjnQH-0fAM,853
99
+ tree_sitter_analyzer-1.7.7.dist-info/RECORD,,