tree-sitter-analyzer 1.9.1__py3-none-any.whl → 1.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/__init__.py +1 -1
- tree_sitter_analyzer/api.py +10 -6
- tree_sitter_analyzer/cli/argument_validator.py +1 -1
- tree_sitter_analyzer/cli/commands/advanced_command.py +3 -6
- tree_sitter_analyzer/cli/commands/query_command.py +3 -1
- tree_sitter_analyzer/cli/commands/table_command.py +3 -3
- tree_sitter_analyzer/constants.py +5 -3
- tree_sitter_analyzer/core/analysis_engine.py +1 -1
- tree_sitter_analyzer/core/cache_service.py +1 -1
- tree_sitter_analyzer/core/engine.py +1 -1
- tree_sitter_analyzer/core/query.py +0 -2
- tree_sitter_analyzer/exceptions.py +1 -1
- tree_sitter_analyzer/file_handler.py +6 -6
- tree_sitter_analyzer/formatters/base_formatter.py +1 -1
- tree_sitter_analyzer/formatters/html_formatter.py +24 -14
- tree_sitter_analyzer/formatters/javascript_formatter.py +28 -21
- tree_sitter_analyzer/formatters/language_formatter_factory.py +7 -4
- tree_sitter_analyzer/formatters/markdown_formatter.py +4 -4
- tree_sitter_analyzer/formatters/python_formatter.py +4 -4
- tree_sitter_analyzer/formatters/typescript_formatter.py +1 -1
- tree_sitter_analyzer/interfaces/mcp_adapter.py +4 -2
- tree_sitter_analyzer/interfaces/mcp_server.py +10 -10
- tree_sitter_analyzer/language_detector.py +30 -5
- tree_sitter_analyzer/language_loader.py +46 -26
- tree_sitter_analyzer/languages/css_plugin.py +6 -6
- tree_sitter_analyzer/languages/html_plugin.py +12 -8
- tree_sitter_analyzer/languages/java_plugin.py +307 -520
- tree_sitter_analyzer/languages/javascript_plugin.py +22 -78
- tree_sitter_analyzer/languages/markdown_plugin.py +277 -297
- tree_sitter_analyzer/languages/python_plugin.py +47 -85
- tree_sitter_analyzer/languages/typescript_plugin.py +48 -123
- tree_sitter_analyzer/mcp/resources/project_stats_resource.py +14 -8
- tree_sitter_analyzer/mcp/server.py +38 -23
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +10 -7
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +51 -7
- tree_sitter_analyzer/mcp/tools/fd_rg_utils.py +15 -2
- tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py +8 -6
- tree_sitter_analyzer/mcp/tools/list_files_tool.py +6 -6
- tree_sitter_analyzer/mcp/tools/search_content_tool.py +48 -19
- tree_sitter_analyzer/mcp/tools/table_format_tool.py +13 -8
- tree_sitter_analyzer/mcp/utils/file_output_manager.py +8 -3
- tree_sitter_analyzer/mcp/utils/gitignore_detector.py +24 -12
- tree_sitter_analyzer/mcp/utils/path_resolver.py +2 -2
- tree_sitter_analyzer/models.py +16 -0
- tree_sitter_analyzer/mypy_current_errors.txt +2 -0
- tree_sitter_analyzer/plugins/base.py +66 -0
- tree_sitter_analyzer/queries/java.py +1 -1
- tree_sitter_analyzer/queries/javascript.py +3 -8
- tree_sitter_analyzer/queries/markdown.py +1 -1
- tree_sitter_analyzer/queries/python.py +2 -2
- tree_sitter_analyzer/security/boundary_manager.py +2 -5
- tree_sitter_analyzer/security/regex_checker.py +2 -2
- tree_sitter_analyzer/security/validator.py +5 -1
- tree_sitter_analyzer/table_formatter.py +4 -4
- tree_sitter_analyzer/utils/__init__.py +27 -116
- tree_sitter_analyzer/{utils.py → utils/logging.py} +2 -2
- tree_sitter_analyzer/utils/tree_sitter_compat.py +2 -2
- {tree_sitter_analyzer-1.9.1.dist-info → tree_sitter_analyzer-1.9.3.dist-info}/METADATA +70 -30
- tree_sitter_analyzer-1.9.3.dist-info/RECORD +110 -0
- tree_sitter_analyzer-1.9.1.dist-info/RECORD +0 -109
- {tree_sitter_analyzer-1.9.1.dist-info → tree_sitter_analyzer-1.9.3.dist-info}/WHEEL +0 -0
- {tree_sitter_analyzer-1.9.1.dist-info → tree_sitter_analyzer-1.9.3.dist-info}/entry_points.txt +0 -0
|
@@ -25,23 +25,23 @@ except ImportError:
|
|
|
25
25
|
MCP_AVAILABLE = False
|
|
26
26
|
|
|
27
27
|
# Fallback types for development without MCP
|
|
28
|
-
class Server:
|
|
28
|
+
class Server: # type: ignore
|
|
29
29
|
pass
|
|
30
30
|
|
|
31
|
-
class InitializationOptions:
|
|
31
|
+
class InitializationOptions: # type: ignore
|
|
32
32
|
def __init__(self, **kwargs: Any) -> None:
|
|
33
33
|
pass
|
|
34
34
|
|
|
35
|
-
class Tool:
|
|
35
|
+
class Tool: # type: ignore
|
|
36
36
|
pass
|
|
37
37
|
|
|
38
|
-
class Resource:
|
|
38
|
+
class Resource: # type: ignore
|
|
39
39
|
pass
|
|
40
40
|
|
|
41
|
-
class TextContent:
|
|
41
|
+
class TextContent: # type: ignore
|
|
42
42
|
pass
|
|
43
43
|
|
|
44
|
-
def stdio_server() -> None:
|
|
44
|
+
def stdio_server() -> None: # type: ignore[misc]
|
|
45
45
|
pass
|
|
46
46
|
|
|
47
47
|
|
|
@@ -70,8 +70,11 @@ from .tools.table_format_tool import TableFormatTool
|
|
|
70
70
|
# Import UniversalAnalyzeTool at module level for test compatibility
|
|
71
71
|
try:
|
|
72
72
|
from .tools.universal_analyze_tool import UniversalAnalyzeTool
|
|
73
|
+
|
|
74
|
+
UNIVERSAL_TOOL_AVAILABLE = True
|
|
73
75
|
except ImportError:
|
|
74
|
-
UniversalAnalyzeTool
|
|
76
|
+
UniversalAnalyzeTool = None # type: ignore
|
|
77
|
+
UNIVERSAL_TOOL_AVAILABLE = False
|
|
75
78
|
|
|
76
79
|
# Set up logging
|
|
77
80
|
logger = setup_logger(__name__)
|
|
@@ -112,13 +115,15 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
112
115
|
|
|
113
116
|
# Optional universal tool to satisfy initialization tests
|
|
114
117
|
# Allow tests to control initialization by checking if UniversalAnalyzeTool is available
|
|
115
|
-
if UniversalAnalyzeTool is not None:
|
|
118
|
+
if UNIVERSAL_TOOL_AVAILABLE and UniversalAnalyzeTool is not None:
|
|
116
119
|
try:
|
|
117
|
-
self.universal_analyze_tool =
|
|
120
|
+
self.universal_analyze_tool: UniversalAnalyzeTool | None = (
|
|
121
|
+
UniversalAnalyzeTool(project_root)
|
|
122
|
+
)
|
|
118
123
|
except Exception:
|
|
119
|
-
self.universal_analyze_tool
|
|
124
|
+
self.universal_analyze_tool = None
|
|
120
125
|
else:
|
|
121
|
-
self.universal_analyze_tool
|
|
126
|
+
self.universal_analyze_tool = None
|
|
122
127
|
|
|
123
128
|
# Initialize MCP resources
|
|
124
129
|
self.code_file_resource = CodeFileResource()
|
|
@@ -162,9 +167,11 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
162
167
|
|
|
163
168
|
# For specific initialization tests we allow delegating to universal tool
|
|
164
169
|
if "file_path" not in arguments:
|
|
165
|
-
|
|
170
|
+
universal_tool = getattr(self, "universal_analyze_tool", None)
|
|
171
|
+
if universal_tool is not None:
|
|
166
172
|
try:
|
|
167
|
-
|
|
173
|
+
result = await universal_tool.execute(arguments)
|
|
174
|
+
return dict(result) # Ensure proper type casting
|
|
168
175
|
except ValueError:
|
|
169
176
|
# Re-raise ValueError as-is for test compatibility
|
|
170
177
|
raise
|
|
@@ -338,8 +345,9 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
338
345
|
Dictionary containing file metrics
|
|
339
346
|
"""
|
|
340
347
|
try:
|
|
341
|
-
|
|
342
|
-
|
|
348
|
+
from ..encoding_utils import read_file_safe
|
|
349
|
+
|
|
350
|
+
content, _ = read_file_safe(file_path)
|
|
343
351
|
|
|
344
352
|
lines = content.split("\n")
|
|
345
353
|
total_lines = len(lines)
|
|
@@ -400,10 +408,6 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
400
408
|
if "-->" not in stripped:
|
|
401
409
|
in_multiline_comment = True
|
|
402
410
|
continue
|
|
403
|
-
elif in_multiline_comment and "-->" in stripped:
|
|
404
|
-
comment_lines += 1
|
|
405
|
-
in_multiline_comment = False
|
|
406
|
-
continue
|
|
407
411
|
|
|
408
412
|
# If not a comment, it's code
|
|
409
413
|
code_lines += 1
|
|
@@ -444,7 +448,7 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
444
448
|
server: Server = Server(self.name)
|
|
445
449
|
|
|
446
450
|
# Register tools using @server decorators (standard MCP pattern)
|
|
447
|
-
@server.list_tools()
|
|
451
|
+
@server.list_tools() # type: ignore[misc]
|
|
448
452
|
async def handle_list_tools() -> list[Tool]:
|
|
449
453
|
"""List all available tools."""
|
|
450
454
|
logger.info("Client requesting tools list")
|
|
@@ -477,7 +481,7 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
477
481
|
logger.info(f"Returning {len(tools)} tools: {[t.name for t in tools]}")
|
|
478
482
|
return tools
|
|
479
483
|
|
|
480
|
-
@server.call_tool()
|
|
484
|
+
@server.call_tool() # type: ignore[misc]
|
|
481
485
|
async def handle_call_tool(
|
|
482
486
|
name: str, arguments: dict[str, Any]
|
|
483
487
|
) -> list[TextContent]:
|
|
@@ -634,9 +638,10 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
634
638
|
pass # Silently ignore logging errors during shutdown
|
|
635
639
|
raise
|
|
636
640
|
|
|
641
|
+
# Some clients may request prompts; explicitly return empty list
|
|
637
642
|
# Some clients may request prompts; explicitly return empty list
|
|
638
643
|
try:
|
|
639
|
-
from mcp.types import Prompt
|
|
644
|
+
from mcp.types import Prompt
|
|
640
645
|
|
|
641
646
|
@server.list_prompts() # type: ignore
|
|
642
647
|
async def handle_list_prompts() -> list[Prompt]:
|
|
@@ -701,10 +706,20 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
701
706
|
server = self.create_server()
|
|
702
707
|
|
|
703
708
|
# Initialize server options with required capabilities field
|
|
709
|
+
from mcp.server.models import ServerCapabilities
|
|
710
|
+
from mcp.types import ToolsCapability, ResourcesCapability, PromptsCapability, LoggingCapability
|
|
711
|
+
|
|
712
|
+
capabilities = ServerCapabilities(
|
|
713
|
+
tools=ToolsCapability(listChanged=True),
|
|
714
|
+
resources=ResourcesCapability(subscribe=True, listChanged=True),
|
|
715
|
+
prompts=PromptsCapability(listChanged=True),
|
|
716
|
+
logging=LoggingCapability()
|
|
717
|
+
)
|
|
718
|
+
|
|
704
719
|
options = InitializationOptions(
|
|
705
720
|
server_name=self.name,
|
|
706
721
|
server_version=self.version,
|
|
707
|
-
capabilities=
|
|
722
|
+
capabilities=capabilities,
|
|
708
723
|
)
|
|
709
724
|
|
|
710
725
|
try:
|
|
@@ -65,8 +65,9 @@ class AnalyzeScaleTool(BaseMCPTool):
|
|
|
65
65
|
Dictionary containing file metrics
|
|
66
66
|
"""
|
|
67
67
|
try:
|
|
68
|
-
|
|
69
|
-
|
|
68
|
+
from ...encoding_utils import read_file_safe
|
|
69
|
+
|
|
70
|
+
content, _ = read_file_safe(file_path)
|
|
70
71
|
|
|
71
72
|
lines = content.split("\n")
|
|
72
73
|
total_lines = len(lines)
|
|
@@ -736,11 +737,13 @@ class AnalyzeScaleTool(BaseMCPTool):
|
|
|
736
737
|
"methods": [],
|
|
737
738
|
"fields": [],
|
|
738
739
|
},
|
|
739
|
-
"scale_category":
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
740
|
+
"scale_category": (
|
|
741
|
+
"small"
|
|
742
|
+
if file_metrics["total_lines"] < 100
|
|
743
|
+
else "medium"
|
|
744
|
+
if file_metrics["total_lines"] < 1000
|
|
745
|
+
else "large"
|
|
746
|
+
),
|
|
744
747
|
"analysis_recommendations": {
|
|
745
748
|
"suitable_for_full_analysis": file_metrics["total_lines"] < 1000,
|
|
746
749
|
"recommended_approach": "JSON files are configuration/data files - structural analysis not applicable",
|
|
@@ -137,19 +137,63 @@ class AnalyzeScaleToolCLICompatible:
|
|
|
137
137
|
else None
|
|
138
138
|
),
|
|
139
139
|
"element_counts": {
|
|
140
|
-
"imports": len(
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
140
|
+
"imports": len(
|
|
141
|
+
[
|
|
142
|
+
e
|
|
143
|
+
for e in analysis_result.elements
|
|
144
|
+
if getattr(e, "element_type", "") == "import"
|
|
145
|
+
]
|
|
146
|
+
),
|
|
147
|
+
"classes": len(
|
|
148
|
+
[
|
|
149
|
+
e
|
|
150
|
+
for e in analysis_result.elements
|
|
151
|
+
if getattr(e, "element_type", "") == "class"
|
|
152
|
+
]
|
|
153
|
+
),
|
|
154
|
+
"methods": len(
|
|
155
|
+
[
|
|
156
|
+
e
|
|
157
|
+
for e in analysis_result.elements
|
|
158
|
+
if getattr(e, "element_type", "") == "function"
|
|
159
|
+
]
|
|
160
|
+
),
|
|
161
|
+
"fields": len(
|
|
162
|
+
[
|
|
163
|
+
e
|
|
164
|
+
for e in analysis_result.elements
|
|
165
|
+
if getattr(e, "element_type", "") == "variable"
|
|
166
|
+
]
|
|
167
|
+
),
|
|
168
|
+
"annotations": len(
|
|
169
|
+
[
|
|
170
|
+
e
|
|
171
|
+
for e in analysis_result.elements
|
|
172
|
+
if getattr(e, "element_type", "") == "annotation"
|
|
173
|
+
]
|
|
174
|
+
),
|
|
145
175
|
},
|
|
146
176
|
"analysis_time_ms": analysis_time_ms,
|
|
147
177
|
"error_message": None,
|
|
148
178
|
}
|
|
149
179
|
|
|
180
|
+
classes_count = len(
|
|
181
|
+
[
|
|
182
|
+
e
|
|
183
|
+
for e in analysis_result.elements
|
|
184
|
+
if getattr(e, "element_type", "") == "class"
|
|
185
|
+
]
|
|
186
|
+
)
|
|
187
|
+
methods_count = len(
|
|
188
|
+
[
|
|
189
|
+
e
|
|
190
|
+
for e in analysis_result.elements
|
|
191
|
+
if getattr(e, "element_type", "") == "function"
|
|
192
|
+
]
|
|
193
|
+
)
|
|
150
194
|
logger.info(
|
|
151
|
-
f"Successfully analyzed {file_path}: {
|
|
152
|
-
f"{
|
|
195
|
+
f"Successfully analyzed {file_path}: {classes_count} classes, "
|
|
196
|
+
f"{methods_count} methods, {analysis_time_ms}ms"
|
|
153
197
|
)
|
|
154
198
|
|
|
155
199
|
return result
|
|
@@ -397,7 +397,13 @@ def group_matches_by_file(matches: list[dict[str, Any]]) -> dict[str, Any]:
|
|
|
397
397
|
# Convert to grouped structure
|
|
398
398
|
files = []
|
|
399
399
|
for file_path, file_matches in file_groups.items():
|
|
400
|
-
files.append(
|
|
400
|
+
files.append(
|
|
401
|
+
{
|
|
402
|
+
"file": file_path,
|
|
403
|
+
"matches": file_matches,
|
|
404
|
+
"match_count": len(file_matches),
|
|
405
|
+
}
|
|
406
|
+
)
|
|
401
407
|
|
|
402
408
|
return {"success": True, "count": total_matches, "files": files}
|
|
403
409
|
|
|
@@ -516,6 +522,11 @@ def summarize_search_results(
|
|
|
516
522
|
sample_lines.append(f"L{line_num}: {truncated_line}")
|
|
517
523
|
remaining_lines -= 1
|
|
518
524
|
|
|
525
|
+
# Ensure we have at least some sample lines if matches exist
|
|
526
|
+
if not sample_lines and file_matches:
|
|
527
|
+
# Fallback: create a simple summary line
|
|
528
|
+
sample_lines.append(f"Found {len(file_matches)} matches")
|
|
529
|
+
|
|
519
530
|
# Optimize file path for token efficiency
|
|
520
531
|
optimized_path = _optimize_file_path(file_path, common_prefix)
|
|
521
532
|
|
|
@@ -628,7 +639,9 @@ def write_files_to_temp(files: list[str]) -> TempFileList:
|
|
|
628
639
|
fd, temp_path = tempfile.mkstemp(prefix="rg-files-", suffix=".lst")
|
|
629
640
|
os.close(fd)
|
|
630
641
|
content = "\n".join(files)
|
|
631
|
-
|
|
642
|
+
from ...encoding_utils import write_file_safe
|
|
643
|
+
|
|
644
|
+
write_file_safe(temp_path, content)
|
|
632
645
|
return TempFileList(path=temp_path)
|
|
633
646
|
|
|
634
647
|
|
|
@@ -249,7 +249,7 @@ class FindAndGrepTool(BaseMCPTool):
|
|
|
249
249
|
return True
|
|
250
250
|
|
|
251
251
|
@handle_mcp_errors("find_and_grep")
|
|
252
|
-
async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
|
|
252
|
+
async def execute(self, arguments: dict[str, Any]) -> dict[str, Any] | int:
|
|
253
253
|
# Check if both fd and rg commands are available
|
|
254
254
|
missing_commands = fd_rg_utils.get_missing_commands()
|
|
255
255
|
if missing_commands:
|
|
@@ -341,14 +341,14 @@ class FindAndGrepTool(BaseMCPTool):
|
|
|
341
341
|
files.sort()
|
|
342
342
|
elif sort_mode == "mtime":
|
|
343
343
|
|
|
344
|
-
def get_mtime(p):
|
|
344
|
+
def get_mtime(p: str) -> float:
|
|
345
345
|
path_obj = pathlib.Path(p)
|
|
346
346
|
return path_obj.stat().st_mtime if path_obj.exists() else 0
|
|
347
347
|
|
|
348
348
|
files.sort(key=get_mtime, reverse=True)
|
|
349
349
|
elif sort_mode == "size":
|
|
350
350
|
|
|
351
|
-
def get_size(p):
|
|
351
|
+
def get_size(p: str) -> int:
|
|
352
352
|
path_obj = pathlib.Path(p)
|
|
353
353
|
return path_obj.stat().st_size if path_obj.exists() else 0
|
|
354
354
|
|
|
@@ -628,9 +628,11 @@ class FindAndGrepTool(BaseMCPTool):
|
|
|
628
628
|
"success": True,
|
|
629
629
|
"results": matches,
|
|
630
630
|
"count": len(matches),
|
|
631
|
-
"files":
|
|
632
|
-
|
|
633
|
-
|
|
631
|
+
"files": (
|
|
632
|
+
fd_rg_utils.group_matches_by_file(matches)["files"]
|
|
633
|
+
if matches
|
|
634
|
+
else []
|
|
635
|
+
),
|
|
634
636
|
"summary": fd_rg_utils.summarize_search_results(matches),
|
|
635
637
|
"meta": result["meta"],
|
|
636
638
|
}
|
|
@@ -301,7 +301,7 @@ class ListFilesTool(BaseMCPTool):
|
|
|
301
301
|
saved_path = file_manager.save_to_file(
|
|
302
302
|
content=json_content, base_name=output_file
|
|
303
303
|
)
|
|
304
|
-
result["output_file"] = saved_path
|
|
304
|
+
result["output_file"] = saved_path # type: ignore[assignment]
|
|
305
305
|
|
|
306
306
|
if suppress_output:
|
|
307
307
|
# Return minimal response to save tokens
|
|
@@ -314,7 +314,7 @@ class ListFilesTool(BaseMCPTool):
|
|
|
314
314
|
}
|
|
315
315
|
except Exception as e:
|
|
316
316
|
logger.warning(f"Failed to save output file: {e}")
|
|
317
|
-
result["output_file_error"] = str(e)
|
|
317
|
+
result["output_file_error"] = str(e) # type: ignore[assignment]
|
|
318
318
|
|
|
319
319
|
return result
|
|
320
320
|
|
|
@@ -350,7 +350,7 @@ class ListFilesTool(BaseMCPTool):
|
|
|
350
350
|
except (OSError, ValueError): # nosec B112
|
|
351
351
|
continue
|
|
352
352
|
|
|
353
|
-
|
|
353
|
+
final_result: dict[str, Any] = {
|
|
354
354
|
"success": True,
|
|
355
355
|
"count": len(results),
|
|
356
356
|
"truncated": truncated,
|
|
@@ -396,7 +396,7 @@ class ListFilesTool(BaseMCPTool):
|
|
|
396
396
|
saved_path = file_manager.save_to_file(
|
|
397
397
|
content=json_content, base_name=output_file
|
|
398
398
|
)
|
|
399
|
-
|
|
399
|
+
final_result["output_file"] = saved_path
|
|
400
400
|
|
|
401
401
|
if suppress_output:
|
|
402
402
|
# Return minimal response to save tokens
|
|
@@ -408,6 +408,6 @@ class ListFilesTool(BaseMCPTool):
|
|
|
408
408
|
}
|
|
409
409
|
except Exception as e:
|
|
410
410
|
logger.warning(f"Failed to save output file: {e}")
|
|
411
|
-
|
|
411
|
+
final_result["output_file_error"] = str(e)
|
|
412
412
|
|
|
413
|
-
return
|
|
413
|
+
return final_result
|
|
@@ -341,21 +341,50 @@ class SearchContentTool(BaseMCPTool):
|
|
|
341
341
|
# Simple cache lookup without complex cross-format logic for performance
|
|
342
342
|
cached_result = self.cache.get(cache_key)
|
|
343
343
|
if cached_result is not None:
|
|
344
|
-
#
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
return
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
344
|
+
# Check if this is a total_only request
|
|
345
|
+
total_only_requested = arguments.get("total_only", False)
|
|
346
|
+
|
|
347
|
+
if total_only_requested:
|
|
348
|
+
# For total_only mode, always return integer if possible
|
|
349
|
+
if isinstance(cached_result, int):
|
|
350
|
+
return cached_result
|
|
351
|
+
elif (
|
|
352
|
+
isinstance(cached_result, dict)
|
|
353
|
+
and "total_matches" in cached_result
|
|
354
|
+
):
|
|
355
|
+
total_matches = cached_result["total_matches"]
|
|
356
|
+
return (
|
|
357
|
+
int(total_matches)
|
|
358
|
+
if isinstance(total_matches, (int, float))
|
|
359
|
+
else 0
|
|
360
|
+
)
|
|
361
|
+
elif isinstance(cached_result, dict) and "count" in cached_result:
|
|
362
|
+
count = cached_result["count"]
|
|
363
|
+
return int(count) if isinstance(count, (int, float)) else 0
|
|
364
|
+
else:
|
|
365
|
+
# Fallback: extract count from dict or return 0
|
|
366
|
+
return 0
|
|
352
367
|
else:
|
|
353
|
-
# For
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
368
|
+
# For non-total_only modes, return dict format
|
|
369
|
+
if isinstance(cached_result, dict):
|
|
370
|
+
cached_result = cached_result.copy()
|
|
371
|
+
cached_result["cache_hit"] = True
|
|
372
|
+
return cached_result
|
|
373
|
+
elif isinstance(cached_result, int):
|
|
374
|
+
# Convert integer to dict format for non-total_only modes
|
|
375
|
+
return {
|
|
376
|
+
"success": True,
|
|
377
|
+
"count": cached_result,
|
|
378
|
+
"total_matches": cached_result,
|
|
379
|
+
"cache_hit": True,
|
|
380
|
+
}
|
|
381
|
+
else:
|
|
382
|
+
# For other types, convert to dict format
|
|
383
|
+
return {
|
|
384
|
+
"success": True,
|
|
385
|
+
"cached_result": cached_result,
|
|
386
|
+
"cache_hit": True,
|
|
387
|
+
}
|
|
359
388
|
|
|
360
389
|
# Handle max_count parameter properly
|
|
361
390
|
# If user specifies max_count, use it directly (with reasonable upper limit)
|
|
@@ -731,11 +760,11 @@ class SearchContentTool(BaseMCPTool):
|
|
|
731
760
|
"elapsed_ms": elapsed_ms,
|
|
732
761
|
"results": matches,
|
|
733
762
|
"summary": fd_rg_utils.summarize_search_results(matches),
|
|
734
|
-
"grouped_by_file":
|
|
735
|
-
"files"
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
763
|
+
"grouped_by_file": (
|
|
764
|
+
fd_rg_utils.group_matches_by_file(matches)["files"]
|
|
765
|
+
if matches
|
|
766
|
+
else []
|
|
767
|
+
),
|
|
739
768
|
}
|
|
740
769
|
|
|
741
770
|
# Convert to JSON for file output
|
|
@@ -374,8 +374,9 @@ class TableFormatTool(BaseMCPTool):
|
|
|
374
374
|
|
|
375
375
|
# Write content to file
|
|
376
376
|
try:
|
|
377
|
-
|
|
378
|
-
|
|
377
|
+
from ...encoding_utils import write_file_safe
|
|
378
|
+
|
|
379
|
+
write_file_safe(output_path, content)
|
|
379
380
|
self.logger.info(f"Output written to file: {output_path}")
|
|
380
381
|
return output_path
|
|
381
382
|
except Exception as e:
|
|
@@ -471,19 +472,23 @@ class TableFormatTool(BaseMCPTool):
|
|
|
471
472
|
try:
|
|
472
473
|
if FormatterRegistry.is_format_supported(format_type):
|
|
473
474
|
# Use new FormatterRegistry
|
|
474
|
-
|
|
475
|
-
|
|
475
|
+
registry_formatter = FormatterRegistry.get_formatter(
|
|
476
|
+
format_type
|
|
477
|
+
)
|
|
478
|
+
table_output = registry_formatter.format(
|
|
479
|
+
structure_result.elements
|
|
480
|
+
)
|
|
476
481
|
else:
|
|
477
482
|
# Fallback to legacy TableFormatter for backward compatibility
|
|
478
|
-
|
|
479
|
-
table_output =
|
|
483
|
+
legacy_formatter: Any = TableFormatter(format_type)
|
|
484
|
+
table_output = legacy_formatter.format_structure(structure_dict)
|
|
480
485
|
except Exception as e:
|
|
481
486
|
# If FormatterRegistry fails, fallback to legacy TableFormatter
|
|
482
487
|
logger.warning(
|
|
483
488
|
f"FormatterRegistry failed, using legacy formatter: {e}"
|
|
484
489
|
)
|
|
485
|
-
|
|
486
|
-
table_output =
|
|
490
|
+
fallback_formatter: Any = TableFormatter(format_type)
|
|
491
|
+
table_output = fallback_formatter.format_structure(structure_dict)
|
|
487
492
|
|
|
488
493
|
# Ensure output format matches CLI exactly
|
|
489
494
|
# Fix line ending differences: normalize to Unix-style LF (\n)
|
|
@@ -36,7 +36,7 @@ class FileOutputManager:
|
|
|
36
36
|
project_root: Optional project root directory for fallback output path
|
|
37
37
|
"""
|
|
38
38
|
self.project_root = project_root
|
|
39
|
-
self._output_path = None
|
|
39
|
+
self._output_path: str | None = None
|
|
40
40
|
self._initialize_output_path()
|
|
41
41
|
|
|
42
42
|
@classmethod
|
|
@@ -252,6 +252,10 @@ class FileOutputManager:
|
|
|
252
252
|
output_file = output_path / filename
|
|
253
253
|
else:
|
|
254
254
|
# Generate filename with appropriate extension
|
|
255
|
+
if base_name is None:
|
|
256
|
+
raise ValueError(
|
|
257
|
+
"base_name cannot be None when filename is not provided"
|
|
258
|
+
)
|
|
255
259
|
generated_filename = self.generate_output_filename(base_name, content)
|
|
256
260
|
output_file = output_path / generated_filename
|
|
257
261
|
|
|
@@ -260,8 +264,9 @@ class FileOutputManager:
|
|
|
260
264
|
|
|
261
265
|
# Write content to file
|
|
262
266
|
try:
|
|
263
|
-
|
|
264
|
-
|
|
267
|
+
from ...encoding_utils import write_file_safe
|
|
268
|
+
|
|
269
|
+
write_file_safe(output_file, content)
|
|
265
270
|
|
|
266
271
|
logger.info(f"Content saved to file: {output_file}")
|
|
267
272
|
return str(output_file)
|
|
@@ -16,7 +16,7 @@ logger = logging.getLogger(__name__)
|
|
|
16
16
|
class GitignoreDetector:
|
|
17
17
|
"""Detects .gitignore interference with file searches"""
|
|
18
18
|
|
|
19
|
-
def __init__(self):
|
|
19
|
+
def __init__(self) -> None:
|
|
20
20
|
self.common_ignore_patterns = {
|
|
21
21
|
# Directory patterns that commonly cause search issues
|
|
22
22
|
"build/*",
|
|
@@ -116,8 +116,10 @@ class GitignoreDetector:
|
|
|
116
116
|
current_search_dir: Directory where the search is being performed
|
|
117
117
|
"""
|
|
118
118
|
try:
|
|
119
|
-
|
|
120
|
-
|
|
119
|
+
from ...encoding_utils import read_file_safe
|
|
120
|
+
|
|
121
|
+
content, _ = read_file_safe(gitignore_file)
|
|
122
|
+
lines = content.splitlines()
|
|
121
123
|
|
|
122
124
|
for line in lines:
|
|
123
125
|
line = line.strip()
|
|
@@ -257,14 +259,14 @@ class GitignoreDetector:
|
|
|
257
259
|
|
|
258
260
|
def get_detection_info(
|
|
259
261
|
self, roots: list[str], project_root: str | None = None
|
|
260
|
-
) -> dict:
|
|
262
|
+
) -> dict[str, object]:
|
|
261
263
|
"""
|
|
262
264
|
Get detailed information about gitignore detection
|
|
263
265
|
|
|
264
266
|
Returns:
|
|
265
267
|
Dictionary with detection details for debugging/logging
|
|
266
268
|
"""
|
|
267
|
-
info = {
|
|
269
|
+
info: dict[str, object] = {
|
|
268
270
|
"should_use_no_ignore": False,
|
|
269
271
|
"detected_gitignore_files": [],
|
|
270
272
|
"interfering_patterns": [],
|
|
@@ -294,13 +296,21 @@ class GitignoreDetector:
|
|
|
294
296
|
gitignore_file, gitignore_dir, project_path
|
|
295
297
|
)
|
|
296
298
|
if patterns:
|
|
297
|
-
info
|
|
298
|
-
|
|
299
|
-
|
|
299
|
+
existing_patterns = info.get("interfering_patterns", [])
|
|
300
|
+
if isinstance(existing_patterns, list):
|
|
301
|
+
info["interfering_patterns"] = existing_patterns + patterns
|
|
302
|
+
else:
|
|
303
|
+
info["interfering_patterns"] = patterns
|
|
304
|
+
|
|
305
|
+
interfering_patterns = info.get("interfering_patterns", [])
|
|
306
|
+
if interfering_patterns:
|
|
300
307
|
info["should_use_no_ignore"] = True
|
|
301
|
-
|
|
302
|
-
|
|
308
|
+
pattern_count = (
|
|
309
|
+
len(interfering_patterns)
|
|
310
|
+
if isinstance(interfering_patterns, list)
|
|
311
|
+
else 0
|
|
303
312
|
)
|
|
313
|
+
info["reason"] = f"Found {pattern_count} interfering patterns"
|
|
304
314
|
|
|
305
315
|
except Exception as e:
|
|
306
316
|
info["reason"] = f"Error during detection: {e}"
|
|
@@ -314,8 +324,10 @@ class GitignoreDetector:
|
|
|
314
324
|
interfering = []
|
|
315
325
|
|
|
316
326
|
try:
|
|
317
|
-
|
|
318
|
-
|
|
327
|
+
from ...encoding_utils import read_file_safe
|
|
328
|
+
|
|
329
|
+
content, _ = read_file_safe(gitignore_file)
|
|
330
|
+
lines = content.splitlines()
|
|
319
331
|
|
|
320
332
|
for line in lines:
|
|
321
333
|
line = line.strip()
|
|
@@ -52,7 +52,7 @@ def _normalize_path_cross_platform(path_str: str) -> str:
|
|
|
52
52
|
from ctypes import wintypes
|
|
53
53
|
|
|
54
54
|
# GetLongPathNameW function
|
|
55
|
-
_GetLongPathNameW = ctypes.windll.kernel32.GetLongPathNameW
|
|
55
|
+
_GetLongPathNameW = ctypes.windll.kernel32.GetLongPathNameW
|
|
56
56
|
_GetLongPathNameW.argtypes = [
|
|
57
57
|
wintypes.LPCWSTR,
|
|
58
58
|
wintypes.LPWSTR,
|
|
@@ -110,7 +110,7 @@ class PathResolver:
|
|
|
110
110
|
project_root: Optional project root directory for resolving relative paths
|
|
111
111
|
"""
|
|
112
112
|
self.project_root = None
|
|
113
|
-
self._cache = {} # Simple cache for resolved paths
|
|
113
|
+
self._cache: dict[str, str] = {} # Simple cache for resolved paths
|
|
114
114
|
self._cache_size_limit = 100 # Limit cache size to prevent memory issues
|
|
115
115
|
|
|
116
116
|
if project_root:
|
tree_sitter_analyzer/models.py
CHANGED
|
@@ -45,6 +45,14 @@ class CodeElement(ABC):
|
|
|
45
45
|
raw_text: str = ""
|
|
46
46
|
language: str = "unknown"
|
|
47
47
|
docstring: str | None = None # JavaDoc/docstring for this element
|
|
48
|
+
element_type: str = "unknown"
|
|
49
|
+
|
|
50
|
+
def to_summary_item(self) -> dict[str, Any]:
|
|
51
|
+
return {
|
|
52
|
+
"name": self.name,
|
|
53
|
+
"type": self.element_type,
|
|
54
|
+
"lines": {"start": self.start_line, "end": self.end_line},
|
|
55
|
+
}
|
|
48
56
|
|
|
49
57
|
|
|
50
58
|
@dataclass(frozen=False)
|
|
@@ -341,6 +349,14 @@ class JavaPackage:
|
|
|
341
349
|
start_line: int = 0
|
|
342
350
|
end_line: int = 0
|
|
343
351
|
|
|
352
|
+
def to_summary_item(self) -> dict[str, Any]:
|
|
353
|
+
"""Return dictionary for summary item"""
|
|
354
|
+
return {
|
|
355
|
+
"name": self.name,
|
|
356
|
+
"type": "package",
|
|
357
|
+
"lines": {"start": self.start_line, "end": self.end_line},
|
|
358
|
+
}
|
|
359
|
+
|
|
344
360
|
|
|
345
361
|
@dataclass(frozen=False)
|
|
346
362
|
class AnalysisResult:
|