tree-sitter-analyzer 1.7.5__py3-none-any.whl → 1.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

Files changed (47) hide show
  1. tree_sitter_analyzer/__init__.py +1 -1
  2. tree_sitter_analyzer/api.py +26 -32
  3. tree_sitter_analyzer/cli/argument_validator.py +77 -0
  4. tree_sitter_analyzer/cli/commands/table_command.py +7 -2
  5. tree_sitter_analyzer/cli_main.py +17 -3
  6. tree_sitter_analyzer/core/cache_service.py +15 -5
  7. tree_sitter_analyzer/core/query.py +33 -22
  8. tree_sitter_analyzer/core/query_service.py +179 -154
  9. tree_sitter_analyzer/exceptions.py +334 -0
  10. tree_sitter_analyzer/file_handler.py +16 -1
  11. tree_sitter_analyzer/formatters/formatter_registry.py +355 -0
  12. tree_sitter_analyzer/formatters/html_formatter.py +462 -0
  13. tree_sitter_analyzer/formatters/language_formatter_factory.py +3 -0
  14. tree_sitter_analyzer/formatters/markdown_formatter.py +1 -1
  15. tree_sitter_analyzer/interfaces/mcp_server.py +3 -1
  16. tree_sitter_analyzer/language_detector.py +91 -7
  17. tree_sitter_analyzer/languages/css_plugin.py +390 -0
  18. tree_sitter_analyzer/languages/html_plugin.py +395 -0
  19. tree_sitter_analyzer/languages/java_plugin.py +116 -0
  20. tree_sitter_analyzer/languages/javascript_plugin.py +113 -0
  21. tree_sitter_analyzer/languages/markdown_plugin.py +266 -46
  22. tree_sitter_analyzer/languages/python_plugin.py +176 -33
  23. tree_sitter_analyzer/languages/typescript_plugin.py +130 -1
  24. tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +68 -3
  25. tree_sitter_analyzer/mcp/tools/fd_rg_utils.py +32 -7
  26. tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py +10 -0
  27. tree_sitter_analyzer/mcp/tools/list_files_tool.py +9 -0
  28. tree_sitter_analyzer/mcp/tools/query_tool.py +100 -52
  29. tree_sitter_analyzer/mcp/tools/read_partial_tool.py +98 -14
  30. tree_sitter_analyzer/mcp/tools/search_content_tool.py +9 -0
  31. tree_sitter_analyzer/mcp/tools/table_format_tool.py +37 -13
  32. tree_sitter_analyzer/models.py +53 -0
  33. tree_sitter_analyzer/output_manager.py +1 -1
  34. tree_sitter_analyzer/plugins/base.py +50 -0
  35. tree_sitter_analyzer/plugins/manager.py +5 -1
  36. tree_sitter_analyzer/queries/css.py +634 -0
  37. tree_sitter_analyzer/queries/html.py +556 -0
  38. tree_sitter_analyzer/queries/markdown.py +54 -164
  39. tree_sitter_analyzer/query_loader.py +16 -3
  40. tree_sitter_analyzer/security/validator.py +343 -46
  41. tree_sitter_analyzer/utils/__init__.py +113 -0
  42. tree_sitter_analyzer/utils/tree_sitter_compat.py +282 -0
  43. tree_sitter_analyzer/utils.py +62 -24
  44. {tree_sitter_analyzer-1.7.5.dist-info → tree_sitter_analyzer-1.8.2.dist-info}/METADATA +136 -14
  45. {tree_sitter_analyzer-1.7.5.dist-info → tree_sitter_analyzer-1.8.2.dist-info}/RECORD +47 -38
  46. {tree_sitter_analyzer-1.7.5.dist-info → tree_sitter_analyzer-1.8.2.dist-info}/entry_points.txt +2 -0
  47. {tree_sitter_analyzer-1.7.5.dist-info → tree_sitter_analyzer-1.8.2.dist-info}/WHEEL +0 -0
@@ -1726,4 +1726,133 @@ class TypeScriptPlugin(LanguagePlugin):
1726
1726
  all_elements.extend(extractor.extract_variables(tree, source_code))
1727
1727
  all_elements.extend(extractor.extract_imports(tree, source_code))
1728
1728
 
1729
- return all_elements
1729
+ return all_elements
1730
+
1731
+ def execute_query_strategy(self, tree: "tree_sitter.Tree", source_code: str, query_key: str) -> list[CodeElement]:
1732
+ """Execute TypeScript-specific query strategy based on query_key"""
1733
+ if not tree or not source_code:
1734
+ return []
1735
+
1736
+ # Initialize extractor with source code
1737
+ self._extractor.source_code = source_code
1738
+ self._extractor.content_lines = source_code.split("\n")
1739
+ self._extractor._reset_caches()
1740
+ self._extractor._detect_file_characteristics()
1741
+
1742
+ # Map query_key to appropriate extraction method
1743
+ query_mapping = {
1744
+ # Function-related queries
1745
+ "function": lambda: self._extractor.extract_functions(tree, source_code),
1746
+ "async_function": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if getattr(f, 'is_async', False)],
1747
+ "arrow_function": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if getattr(f, 'is_arrow', False)],
1748
+ "method": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if getattr(f, 'is_method', False)],
1749
+ "constructor": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if getattr(f, 'is_constructor', False)],
1750
+ "signature": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if getattr(f, 'is_signature', False)],
1751
+
1752
+ # Class-related queries
1753
+ "class": lambda: self._extractor.extract_classes(tree, source_code),
1754
+ "interface": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'class_type', '') == 'interface'],
1755
+ "type_alias": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'class_type', '') == 'type'],
1756
+ "enum": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'class_type', '') == 'enum'],
1757
+
1758
+ # Variable-related queries
1759
+ "variable": lambda: self._extractor.extract_variables(tree, source_code),
1760
+
1761
+ # Import/Export queries
1762
+ "import": lambda: self._extractor.extract_imports(tree, source_code),
1763
+ "export": lambda: [i for i in self._extractor.extract_imports(tree, source_code) if 'export' in getattr(i, 'raw_text', '')],
1764
+
1765
+ # TypeScript-specific queries
1766
+ "generic": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if 'generics' in getattr(c, 'raw_text', '')],
1767
+ "decorator": lambda: [f for f in self._extractor.extract_functions(tree, source_code) if '@' in getattr(f, 'raw_text', '')],
1768
+
1769
+ # Framework-specific queries
1770
+ "react_component": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'is_react_component', False)],
1771
+ "angular_component": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'framework_type', '') == 'angular'],
1772
+ "vue_component": lambda: [c for c in self._extractor.extract_classes(tree, source_code) if getattr(c, 'framework_type', '') == 'vue'],
1773
+ }
1774
+
1775
+ # Execute the appropriate extraction method
1776
+ if query_key in query_mapping:
1777
+ try:
1778
+ return query_mapping[query_key]()
1779
+ except Exception as e:
1780
+ log_error(f"Error executing TypeScript query '{query_key}': {e}")
1781
+ return []
1782
+ else:
1783
+ log_warning(f"Unsupported TypeScript query key: {query_key}")
1784
+ return []
1785
+
1786
+ def get_element_categories(self) -> dict[str, list[str]]:
1787
+ """Get TypeScript element categories mapping query_key to node_types"""
1788
+ return {
1789
+ # Function-related categories
1790
+ "function": [
1791
+ "function_declaration",
1792
+ "function_expression",
1793
+ "arrow_function",
1794
+ "generator_function_declaration"
1795
+ ],
1796
+ "async_function": [
1797
+ "function_declaration",
1798
+ "function_expression",
1799
+ "arrow_function",
1800
+ "method_definition"
1801
+ ],
1802
+ "arrow_function": ["arrow_function"],
1803
+ "method": [
1804
+ "method_definition",
1805
+ "method_signature"
1806
+ ],
1807
+ "constructor": ["method_definition"],
1808
+ "signature": ["method_signature"],
1809
+
1810
+ # Class-related categories
1811
+ "class": [
1812
+ "class_declaration",
1813
+ "abstract_class_declaration"
1814
+ ],
1815
+ "interface": ["interface_declaration"],
1816
+ "type_alias": ["type_alias_declaration"],
1817
+ "enum": ["enum_declaration"],
1818
+
1819
+ # Variable-related categories
1820
+ "variable": [
1821
+ "variable_declaration",
1822
+ "lexical_declaration",
1823
+ "property_definition",
1824
+ "property_signature"
1825
+ ],
1826
+
1827
+ # Import/Export categories
1828
+ "import": ["import_statement"],
1829
+ "export": [
1830
+ "export_statement",
1831
+ "export_declaration"
1832
+ ],
1833
+
1834
+ # TypeScript-specific categories
1835
+ "generic": [
1836
+ "type_parameters",
1837
+ "type_parameter"
1838
+ ],
1839
+ "decorator": [
1840
+ "decorator",
1841
+ "decorator_call_expression"
1842
+ ],
1843
+
1844
+ # Framework-specific categories
1845
+ "react_component": [
1846
+ "class_declaration",
1847
+ "function_declaration",
1848
+ "arrow_function"
1849
+ ],
1850
+ "angular_component": [
1851
+ "class_declaration",
1852
+ "decorator"
1853
+ ],
1854
+ "vue_component": [
1855
+ "class_declaration",
1856
+ "function_declaration"
1857
+ ]
1858
+ }
@@ -379,19 +379,27 @@ class AnalyzeScaleTool(BaseMCPTool):
379
379
  include_details = arguments.get("include_details", False)
380
380
  include_guidance = arguments.get("include_guidance", True)
381
381
 
382
+ # Security validation BEFORE path resolution to catch symlinks
383
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path)
384
+ if not is_valid:
385
+ logger.warning(
386
+ f"Security validation failed for file path: {file_path} - {error_msg}"
387
+ )
388
+ raise ValueError(f"Invalid file path: {error_msg}")
389
+
382
390
  # Resolve file path to absolute path
383
391
  resolved_file_path = self.path_resolver.resolve(file_path)
384
392
  logger.info(f"Analyzing file: {file_path} (resolved to: {resolved_file_path})")
385
393
 
386
- # Security validation using resolved path
394
+ # Additional security validation on resolved path
387
395
  is_valid, error_msg = self.security_validator.validate_file_path(
388
396
  resolved_file_path
389
397
  )
390
398
  if not is_valid:
391
399
  logger.warning(
392
- f"Security validation failed for file path: {resolved_file_path} - {error_msg}"
400
+ f"Security validation failed for resolved path: {resolved_file_path} - {error_msg}"
393
401
  )
394
- raise ValueError(f"Invalid file path: {error_msg}")
402
+ raise ValueError(f"Invalid resolved path: {error_msg}")
395
403
 
396
404
  # Sanitize inputs
397
405
  if language:
@@ -423,6 +431,12 @@ class AnalyzeScaleTool(BaseMCPTool):
423
431
  # Calculate basic file metrics
424
432
  file_metrics = self._calculate_file_metrics(resolved_file_path)
425
433
 
434
+ # Handle JSON files specially - they don't need structural analysis
435
+ if language == "json":
436
+ return self._create_json_file_analysis(
437
+ resolved_file_path, file_metrics, include_guidance
438
+ )
439
+
426
440
  # Use appropriate analyzer based on language
427
441
  if language == "java":
428
442
  # Use AdvancedAnalyzer for comprehensive analysis
@@ -472,6 +486,7 @@ class AnalyzeScaleTool(BaseMCPTool):
472
486
 
473
487
  # Build enhanced result structure
474
488
  result = {
489
+ "success": True,
475
490
  "file_path": file_path,
476
491
  "language": language,
477
492
  "file_metrics": file_metrics,
@@ -688,6 +703,56 @@ class AnalyzeScaleTool(BaseMCPTool):
688
703
 
689
704
  return True
690
705
 
706
+ def _create_json_file_analysis(
707
+ self, file_path: str, file_metrics: dict[str, Any], include_guidance: bool
708
+ ) -> dict[str, Any]:
709
+ """
710
+ Create analysis result for JSON files.
711
+
712
+ Args:
713
+ file_path: Path to the JSON file
714
+ file_metrics: Basic file metrics
715
+ include_guidance: Whether to include guidance
716
+
717
+ Returns:
718
+ Analysis result for JSON file
719
+ """
720
+ result = {
721
+ "success": True,
722
+ "file_path": file_path,
723
+ "language": "json",
724
+ "file_size_bytes": file_metrics["file_size_bytes"],
725
+ "total_lines": file_metrics["total_lines"],
726
+ "non_empty_lines": file_metrics["total_lines"] - file_metrics["blank_lines"],
727
+ "estimated_tokens": file_metrics["estimated_tokens"],
728
+ "complexity_metrics": {
729
+ "total_elements": 0,
730
+ "max_depth": 0,
731
+ "avg_complexity": 0.0,
732
+ },
733
+ "structural_overview": {
734
+ "classes": [],
735
+ "methods": [],
736
+ "fields": [],
737
+ },
738
+ "scale_category": "small" if file_metrics["total_lines"] < 100 else "medium" if file_metrics["total_lines"] < 1000 else "large",
739
+ "analysis_recommendations": {
740
+ "suitable_for_full_analysis": file_metrics["total_lines"] < 1000,
741
+ "recommended_approach": "JSON files are configuration/data files - structural analysis not applicable",
742
+ "token_efficiency_notes": "JSON files can be read directly without tree-sitter parsing",
743
+ },
744
+ }
745
+
746
+ if include_guidance:
747
+ result["llm_analysis_guidance"] = {
748
+ "file_characteristics": "JSON configuration/data file",
749
+ "recommended_workflow": "Direct file reading for content analysis",
750
+ "token_optimization": "Use simple file reading tools for JSON content",
751
+ "analysis_focus": "Data structure and configuration values",
752
+ }
753
+
754
+ return result
755
+
691
756
  def get_tool_definition(self) -> dict[str, Any]:
692
757
  """
693
758
  Get the MCP tool definition for check_code_scale.
@@ -11,6 +11,7 @@ from __future__ import annotations
11
11
  import asyncio
12
12
  import json
13
13
  import os
14
+ import shutil
14
15
  import tempfile
15
16
  from dataclasses import dataclass
16
17
  from pathlib import Path
@@ -27,6 +28,21 @@ DEFAULT_RG_TIMEOUT_MS = 4000
27
28
  RG_TIMEOUT_HARD_CAP_MS = 30000
28
29
 
29
30
 
31
+ def check_external_command(command: str) -> bool:
32
+ """Check if an external command is available in the system PATH."""
33
+ return shutil.which(command) is not None
34
+
35
+
36
+ def get_missing_commands() -> list[str]:
37
+ """Get list of missing external commands required by fd/rg tools."""
38
+ missing = []
39
+ if not check_external_command("fd"):
40
+ missing.append("fd")
41
+ if not check_external_command("rg"):
42
+ missing.append("rg")
43
+ return missing
44
+
45
+
30
46
  def clamp_int(value: int | None, default_value: int, hard_cap: int) -> int:
31
47
  if value is None:
32
48
  return default_value
@@ -64,13 +80,22 @@ async def run_command_capture(
64
80
  Returns (returncode, stdout, stderr). On timeout, kills process and returns 124.
65
81
  Separated into a util for easy monkeypatching in tests.
66
82
  """
67
- # Create process
68
- proc = await asyncio.create_subprocess_exec(
69
- *cmd,
70
- stdin=asyncio.subprocess.PIPE if input_data is not None else None,
71
- stdout=asyncio.subprocess.PIPE,
72
- stderr=asyncio.subprocess.PIPE,
73
- )
83
+ # Check if command exists before attempting to run
84
+ if cmd and not check_external_command(cmd[0]):
85
+ error_msg = f"Command '{cmd[0]}' not found in PATH. Please install {cmd[0]} to use this functionality."
86
+ return 127, b"", error_msg.encode()
87
+
88
+ try:
89
+ # Create process
90
+ proc = await asyncio.create_subprocess_exec(
91
+ *cmd,
92
+ stdin=asyncio.subprocess.PIPE if input_data is not None else None,
93
+ stdout=asyncio.subprocess.PIPE,
94
+ stderr=asyncio.subprocess.PIPE,
95
+ )
96
+ except FileNotFoundError as e:
97
+ error_msg = f"Command '{cmd[0]}' not found: {e}"
98
+ return 127, b"", error_msg.encode()
74
99
 
75
100
  # Compute timeout seconds
76
101
  timeout_s: float | None = None
@@ -239,6 +239,16 @@ class FindAndGrepTool(BaseMCPTool):
239
239
 
240
240
  @handle_mcp_errors("find_and_grep")
241
241
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
242
+ # Check if both fd and rg commands are available
243
+ missing_commands = fd_rg_utils.get_missing_commands()
244
+ if missing_commands:
245
+ return {
246
+ "success": False,
247
+ "error": f"Required commands not found: {', '.join(missing_commands)}. Please install fd (https://github.com/sharkdp/fd) and ripgrep (https://github.com/BurntSushi/ripgrep) to use this tool.",
248
+ "count": 0,
249
+ "results": []
250
+ }
251
+
242
252
  self.validate_arguments(arguments)
243
253
  roots = self._validate_roots(arguments["roots"]) # absolute validated
244
254
 
@@ -181,6 +181,15 @@ class ListFilesTool(BaseMCPTool):
181
181
 
182
182
  @handle_mcp_errors("list_files")
183
183
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
184
+ # Check if fd command is available
185
+ if not fd_rg_utils.check_external_command("fd"):
186
+ return {
187
+ "success": False,
188
+ "error": "fd command not found. Please install fd (https://github.com/sharkdp/fd) to use this tool.",
189
+ "count": 0,
190
+ "results": []
191
+ }
192
+
184
193
  self.validate_arguments(arguments)
185
194
  roots = self._validate_roots(arguments["roots"]) # normalized absolutes
186
195
 
@@ -97,7 +97,6 @@ class QueryTool(BaseMCPTool):
97
97
  },
98
98
  }
99
99
 
100
- @handle_mcp_errors("query_code")
101
100
  async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
102
101
  """
103
102
  Execute query tool
@@ -108,46 +107,78 @@ class QueryTool(BaseMCPTool):
108
107
  Returns:
109
108
  Query results
110
109
  """
111
- # Validate input parameters
112
- file_path = arguments.get("file_path")
113
- if not file_path:
114
- raise ValueError("file_path is required")
110
+ try:
111
+ # Validate input parameters - check for empty arguments first
112
+ if not arguments:
113
+ from ..utils.error_handler import AnalysisError
114
+ raise AnalysisError(
115
+ "file_path is required",
116
+ operation="query_code"
117
+ )
118
+
119
+ file_path = arguments.get("file_path")
120
+ if not file_path:
121
+ from ..utils.error_handler import AnalysisError
122
+ raise AnalysisError(
123
+ "file_path is required",
124
+ operation="query_code"
125
+ )
126
+
127
+ # Check that either query_key or query_string is provided early
128
+ query_key = arguments.get("query_key")
129
+ query_string = arguments.get("query_string")
130
+
131
+ if not query_key and not query_string:
132
+ from ..utils.error_handler import AnalysisError
133
+ raise AnalysisError(
134
+ "Either query_key or query_string must be provided",
135
+ operation="query_code"
136
+ )
137
+
138
+ # Security validation BEFORE path resolution to catch symlinks
139
+ is_valid, error_msg = self.security_validator.validate_file_path(file_path)
140
+ if not is_valid:
141
+ return {
142
+ "success": False,
143
+ "error": f"Invalid or unsafe file path: {error_msg or file_path}"
144
+ }
115
145
 
116
- # Resolve file path to absolute path
117
- resolved_file_path = self.path_resolver.resolve(file_path)
118
- logger.info(f"Querying file: {file_path} (resolved to: {resolved_file_path})")
119
-
120
- # Security validation using resolved path
121
- is_valid, error_msg = self.security_validator.validate_file_path(
122
- resolved_file_path
123
- )
124
- if not is_valid:
125
- raise ValueError(
126
- f"Invalid or unsafe file path: {error_msg or resolved_file_path}"
127
- )
146
+ # Resolve file path to absolute path
147
+ resolved_file_path = self.path_resolver.resolve(file_path)
148
+ logger.info(f"Querying file: {file_path} (resolved to: {resolved_file_path})")
128
149
 
129
- # Get query parameters
130
- query_key = arguments.get("query_key")
131
- query_string = arguments.get("query_string")
132
- filter_expression = arguments.get("filter")
133
- output_format = arguments.get("output_format", "json")
134
- output_file = arguments.get("output_file")
135
- suppress_output = arguments.get("suppress_output", False)
150
+ # Additional security validation on resolved path
151
+ is_valid, error_msg = self.security_validator.validate_file_path(
152
+ resolved_file_path
153
+ )
154
+ if not is_valid:
155
+ return {
156
+ "success": False,
157
+ "error": f"Invalid or unsafe resolved path: {error_msg or resolved_file_path}"
158
+ }
136
159
 
137
- if not query_key and not query_string:
138
- raise ValueError("Either query_key or query_string must be provided")
160
+ # Get query parameters (already validated above)
161
+ filter_expression = arguments.get("filter")
162
+ output_format = arguments.get("output_format", "json")
163
+ output_file = arguments.get("output_file")
164
+ suppress_output = arguments.get("suppress_output", False)
139
165
 
140
- if query_key and query_string:
141
- raise ValueError("Cannot provide both query_key and query_string")
166
+ if query_key and query_string:
167
+ return {
168
+ "success": False,
169
+ "error": "Cannot provide both query_key and query_string"
170
+ }
142
171
 
143
- # Detect language
144
- language = arguments.get("language")
145
- if not language:
146
- language = detect_language_from_file(resolved_file_path)
172
+ # Detect language
173
+ language = arguments.get("language")
147
174
  if not language:
148
- raise ValueError(f"Could not detect language for file: {file_path}")
175
+ language = detect_language_from_file(resolved_file_path)
176
+ if not language:
177
+ return {
178
+ "success": False,
179
+ "error": f"Could not detect language for file: {file_path}"
180
+ }
149
181
 
150
- try:
151
182
  # Execute query
152
183
  results = await self.query_service.execute_query(
153
184
  resolved_file_path, language, query_key, query_string, filter_expression
@@ -163,7 +194,9 @@ class QueryTool(BaseMCPTool):
163
194
 
164
195
  # Format output
165
196
  if output_format == "summary":
166
- formatted_result = self._format_summary(results, query_key or "custom", language)
197
+ formatted_result = self._format_summary(
198
+ results, query_key or "custom", language
199
+ )
167
200
  else:
168
201
  formatted_result = {
169
202
  "success": True,
@@ -178,28 +211,31 @@ class QueryTool(BaseMCPTool):
178
211
  if output_file:
179
212
  try:
180
213
  import json
181
-
214
+
182
215
  # Generate base name from original file path if not provided
183
216
  if not output_file or output_file.strip() == "":
184
- base_name = f"{Path(file_path).stem}_query_{query_key or 'custom'}"
217
+ base_name = (
218
+ f"{Path(file_path).stem}_query_{query_key or 'custom'}"
219
+ )
185
220
  else:
186
221
  base_name = output_file
187
222
 
188
223
  # Convert result to JSON string for file output
189
- json_content = json.dumps(formatted_result, indent=2, ensure_ascii=False)
224
+ json_content = json.dumps(
225
+ formatted_result, indent=2, ensure_ascii=False
226
+ )
190
227
 
191
228
  # Save to file with automatic extension detection
192
229
  saved_file_path = self.file_output_manager.save_to_file(
193
- content=json_content,
194
- base_name=base_name
230
+ content=json_content, base_name=base_name
195
231
  )
196
-
232
+
197
233
  # Add file output info to result
198
234
  formatted_result["output_file_path"] = saved_file_path
199
235
  formatted_result["file_saved"] = True
200
-
236
+
201
237
  logger.info(f"Query output saved to: {saved_file_path}")
202
-
238
+
203
239
  except Exception as e:
204
240
  logger.error(f"Failed to save output to file: {e}")
205
241
  formatted_result["file_save_error"] = str(e)
@@ -215,26 +251,35 @@ class QueryTool(BaseMCPTool):
215
251
  "language": language,
216
252
  "query": query_key or query_string,
217
253
  }
218
-
254
+
219
255
  # Include file output info if present
220
256
  if "output_file_path" in formatted_result:
221
- minimal_result["output_file_path"] = formatted_result["output_file_path"]
257
+ minimal_result["output_file_path"] = formatted_result[
258
+ "output_file_path"
259
+ ]
222
260
  minimal_result["file_saved"] = formatted_result["file_saved"]
223
261
  if "file_save_error" in formatted_result:
224
- minimal_result["file_save_error"] = formatted_result["file_save_error"]
262
+ minimal_result["file_save_error"] = formatted_result[
263
+ "file_save_error"
264
+ ]
225
265
  minimal_result["file_saved"] = formatted_result["file_saved"]
226
-
266
+
227
267
  return minimal_result
228
268
  else:
229
269
  return formatted_result
230
270
 
231
271
  except Exception as e:
272
+ from ..utils.error_handler import AnalysisError
273
+ # Re-raise AnalysisError to maintain proper error handling
274
+ if isinstance(e, AnalysisError):
275
+ raise
276
+
232
277
  logger.error(f"Query execution failed: {e}")
233
278
  return {
234
279
  "success": False,
235
280
  "error": str(e),
236
- "file_path": file_path,
237
- "language": language,
281
+ "file_path": arguments.get("file_path", "unknown"),
282
+ "language": arguments.get("language", "unknown"),
238
283
  }
239
284
 
240
285
  def _format_summary(
@@ -252,7 +297,7 @@ class QueryTool(BaseMCPTool):
252
297
  Summary formatted results
253
298
  """
254
299
  # Group by capture name
255
- by_capture = {}
300
+ by_capture: dict[str, list[dict[str, Any]]] = {}
256
301
  for result in results:
257
302
  capture_name = result["capture_name"]
258
303
  if capture_name not in by_capture:
@@ -260,7 +305,7 @@ class QueryTool(BaseMCPTool):
260
305
  by_capture[capture_name].append(result)
261
306
 
262
307
  # Create summary
263
- summary = {
308
+ summary: dict[str, Any] = {
264
309
  "success": True,
265
310
  "query_type": query_type,
266
311
  "language": language,
@@ -302,6 +347,9 @@ class QueryTool(BaseMCPTool):
302
347
 
303
348
  # Match common declaration patterns
304
349
  patterns = [
350
+ # Markdown headers
351
+ r"^#{1,6}\s+(.+)$", # Markdown headers (# Title, ## Subtitle, etc.)
352
+ # Programming language patterns
305
353
  r"(?:public|private|protected)?\s*(?:static)?\s*(?:class|interface)\s+(\w+)", # class/interface
306
354
  r"(?:public|private|protected)?\s*(?:static)?\s*\w+\s+(\w+)\s*\(", # method
307
355
  r"(\w+)\s*\(", # simple function call
@@ -310,7 +358,7 @@ class QueryTool(BaseMCPTool):
310
358
  for pattern in patterns:
311
359
  match = re.search(pattern, first_line)
312
360
  if match:
313
- return match.group(1)
361
+ return match.group(1).strip()
314
362
 
315
363
  return "unnamed"
316
364