tree-sitter-analyzer 1.1.3__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/cli/commands/advanced_command.py +145 -6
- tree_sitter_analyzer/cli/commands/structure_command.py +23 -5
- tree_sitter_analyzer/cli/commands/summary_command.py +19 -4
- tree_sitter_analyzer/cli/commands/table_command.py +14 -6
- tree_sitter_analyzer/constants.py +68 -0
- tree_sitter_analyzer/core/analysis_engine.py +0 -5
- tree_sitter_analyzer/core/engine.py +0 -12
- tree_sitter_analyzer/interfaces/cli_adapter.py +27 -12
- tree_sitter_analyzer/interfaces/mcp_adapter.py +31 -15
- tree_sitter_analyzer/mcp/server.py +187 -35
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +42 -19
- tree_sitter_analyzer/mcp/tools/base_tool.py +90 -5
- tree_sitter_analyzer/mcp/tools/query_tool.py +73 -6
- tree_sitter_analyzer/mcp/tools/read_partial_tool.py +3 -6
- tree_sitter_analyzer/mcp/tools/table_format_tool.py +37 -11
- tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +102 -22
- tree_sitter_analyzer/models.py +138 -43
- tree_sitter_analyzer/security/boundary_manager.py +29 -9
- tree_sitter_analyzer/security/validator.py +16 -3
- {tree_sitter_analyzer-1.1.3.dist-info → tree_sitter_analyzer-1.2.0.dist-info}/METADATA +296 -125
- {tree_sitter_analyzer-1.1.3.dist-info → tree_sitter_analyzer-1.2.0.dist-info}/RECORD +23 -22
- {tree_sitter_analyzer-1.1.3.dist-info → tree_sitter_analyzer-1.2.0.dist-info}/WHEEL +0 -0
- {tree_sitter_analyzer-1.1.3.dist-info → tree_sitter_analyzer-1.2.0.dist-info}/entry_points.txt +0 -0
|
@@ -45,13 +45,21 @@ except ImportError:
|
|
|
45
45
|
pass
|
|
46
46
|
|
|
47
47
|
|
|
48
|
+
from ..constants import (
|
|
49
|
+
ELEMENT_TYPE_CLASS,
|
|
50
|
+
ELEMENT_TYPE_FUNCTION,
|
|
51
|
+
ELEMENT_TYPE_IMPORT,
|
|
52
|
+
ELEMENT_TYPE_PACKAGE,
|
|
53
|
+
ELEMENT_TYPE_VARIABLE,
|
|
54
|
+
is_element_of_type,
|
|
55
|
+
)
|
|
48
56
|
from ..core.analysis_engine import get_analysis_engine
|
|
49
57
|
from ..project_detector import detect_project_root
|
|
50
58
|
from ..security import SecurityValidator
|
|
51
59
|
from ..utils import setup_logger
|
|
52
60
|
from . import MCP_INFO
|
|
53
61
|
from .resources import CodeFileResource, ProjectStatsResource
|
|
54
|
-
from .tools.
|
|
62
|
+
from .tools.analyze_scale_tool import AnalyzeScaleTool
|
|
55
63
|
from .tools.query_tool import QueryTool
|
|
56
64
|
from .tools.read_partial_tool import ReadPartialTool
|
|
57
65
|
from .tools.table_format_tool import TableFormatTool
|
|
@@ -81,12 +89,10 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
81
89
|
|
|
82
90
|
# Initialize MCP tools with security validation (four core tools)
|
|
83
91
|
self.query_tool = QueryTool(project_root) # query_code
|
|
84
|
-
self.read_partial_tool
|
|
85
|
-
|
|
86
|
-
) #
|
|
87
|
-
|
|
88
|
-
project_root
|
|
89
|
-
) # analyze_code_structure
|
|
92
|
+
self.read_partial_tool = ReadPartialTool(project_root) # extract_code_section
|
|
93
|
+
self.table_format_tool = TableFormatTool(project_root) # analyze_code_structure
|
|
94
|
+
self.analyze_scale_tool = AnalyzeScaleTool(project_root) # check_code_scale
|
|
95
|
+
|
|
90
96
|
# Optional universal tool to satisfy initialization tests
|
|
91
97
|
try:
|
|
92
98
|
from .tools.universal_analyze_tool import UniversalAnalyzeTool
|
|
@@ -186,54 +192,190 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
186
192
|
)
|
|
187
193
|
raise RuntimeError(f"Failed to analyze file: {file_path} - {error_msg}")
|
|
188
194
|
|
|
189
|
-
#
|
|
190
|
-
|
|
195
|
+
# Get element counts from the unified elements list
|
|
196
|
+
elements = analysis_result.elements or []
|
|
191
197
|
|
|
192
|
-
#
|
|
193
|
-
|
|
198
|
+
# Count elements by type using the new unified system
|
|
199
|
+
classes_count = len(
|
|
200
|
+
[e for e in elements if is_element_of_type(e, ELEMENT_TYPE_CLASS)]
|
|
201
|
+
)
|
|
202
|
+
methods_count = len(
|
|
203
|
+
[e for e in elements if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)]
|
|
204
|
+
)
|
|
205
|
+
fields_count = len(
|
|
206
|
+
[e for e in elements if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)]
|
|
207
|
+
)
|
|
208
|
+
imports_count = len(
|
|
209
|
+
[e for e in elements if is_element_of_type(e, ELEMENT_TYPE_IMPORT)]
|
|
210
|
+
)
|
|
211
|
+
packages_count = len(
|
|
212
|
+
[e for e in elements if is_element_of_type(e, ELEMENT_TYPE_PACKAGE)]
|
|
213
|
+
)
|
|
214
|
+
total_elements = (
|
|
215
|
+
classes_count
|
|
216
|
+
+ methods_count
|
|
217
|
+
+ fields_count
|
|
218
|
+
+ imports_count
|
|
219
|
+
+ packages_count
|
|
220
|
+
)
|
|
194
221
|
|
|
195
|
-
#
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
222
|
+
# Calculate accurate file metrics including comments and blank lines
|
|
223
|
+
file_metrics = self._calculate_file_metrics(resolved_path, language)
|
|
224
|
+
lines_code = file_metrics["code_lines"]
|
|
225
|
+
lines_comment = file_metrics["comment_lines"]
|
|
226
|
+
lines_blank = file_metrics["blank_lines"]
|
|
200
227
|
|
|
201
228
|
result = {
|
|
202
229
|
"file_path": file_path,
|
|
203
230
|
"language": language,
|
|
204
231
|
"metrics": {
|
|
205
|
-
"lines_total":
|
|
206
|
-
"lines_code":
|
|
207
|
-
"lines_comment":
|
|
208
|
-
"lines_blank":
|
|
232
|
+
"lines_total": analysis_result.line_count,
|
|
233
|
+
"lines_code": lines_code,
|
|
234
|
+
"lines_comment": lines_comment,
|
|
235
|
+
"lines_blank": lines_blank,
|
|
209
236
|
"elements": {
|
|
210
237
|
"classes": classes_count,
|
|
211
238
|
"methods": methods_count,
|
|
212
239
|
"fields": fields_count,
|
|
213
240
|
"imports": imports_count,
|
|
214
|
-
"
|
|
241
|
+
"packages": packages_count,
|
|
242
|
+
"total": total_elements,
|
|
215
243
|
},
|
|
216
244
|
},
|
|
217
245
|
}
|
|
218
246
|
|
|
219
247
|
if include_complexity:
|
|
220
248
|
# Add complexity metrics if available
|
|
221
|
-
methods = [
|
|
249
|
+
methods = [
|
|
250
|
+
e for e in elements if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
|
|
251
|
+
]
|
|
222
252
|
if methods:
|
|
223
|
-
complexities = [
|
|
253
|
+
complexities = [getattr(m, "complexity_score", 0) for m in methods]
|
|
224
254
|
result["metrics"]["complexity"] = {
|
|
225
255
|
"total": sum(complexities),
|
|
226
|
-
"average": (
|
|
227
|
-
sum(complexities) / len(complexities) if complexities else 0
|
|
256
|
+
"average": round(
|
|
257
|
+
sum(complexities) / len(complexities) if complexities else 0, 2
|
|
228
258
|
),
|
|
229
259
|
"max": max(complexities) if complexities else 0,
|
|
230
260
|
}
|
|
231
261
|
|
|
232
262
|
if include_details:
|
|
233
|
-
|
|
263
|
+
# Convert elements to serializable format
|
|
264
|
+
detailed_elements = []
|
|
265
|
+
for elem in elements:
|
|
266
|
+
if hasattr(elem, "__dict__"):
|
|
267
|
+
detailed_elements.append(elem.__dict__)
|
|
268
|
+
else:
|
|
269
|
+
detailed_elements.append(str(elem))
|
|
270
|
+
result["detailed_elements"] = detailed_elements
|
|
234
271
|
|
|
235
272
|
return result
|
|
236
273
|
|
|
274
|
+
def _calculate_file_metrics(self, file_path: str, language: str) -> dict[str, Any]:
|
|
275
|
+
"""
|
|
276
|
+
Calculate accurate file metrics including line counts, comments, and blank lines.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
file_path: Path to the file to analyze
|
|
280
|
+
language: Programming language
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
Dictionary containing file metrics
|
|
284
|
+
"""
|
|
285
|
+
try:
|
|
286
|
+
with open(file_path, encoding="utf-8") as f:
|
|
287
|
+
content = f.read()
|
|
288
|
+
|
|
289
|
+
lines = content.split("\n")
|
|
290
|
+
total_lines = len(lines)
|
|
291
|
+
|
|
292
|
+
# Remove empty line at the end if file ends with newline
|
|
293
|
+
if lines and not lines[-1]:
|
|
294
|
+
total_lines -= 1
|
|
295
|
+
|
|
296
|
+
# Count different types of lines
|
|
297
|
+
code_lines = 0
|
|
298
|
+
comment_lines = 0
|
|
299
|
+
blank_lines = 0
|
|
300
|
+
in_multiline_comment = False
|
|
301
|
+
|
|
302
|
+
for line in lines:
|
|
303
|
+
stripped = line.strip()
|
|
304
|
+
|
|
305
|
+
# Check for blank lines first
|
|
306
|
+
if not stripped:
|
|
307
|
+
blank_lines += 1
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
# Check if we're in a multi-line comment
|
|
311
|
+
if in_multiline_comment:
|
|
312
|
+
comment_lines += 1
|
|
313
|
+
# Check if this line ends the multi-line comment
|
|
314
|
+
if "*/" in stripped:
|
|
315
|
+
in_multiline_comment = False
|
|
316
|
+
continue
|
|
317
|
+
|
|
318
|
+
# Check for multi-line comment start
|
|
319
|
+
if stripped.startswith("/**") or stripped.startswith("/*"):
|
|
320
|
+
comment_lines += 1
|
|
321
|
+
# Check if this line also ends the comment
|
|
322
|
+
if "*/" not in stripped:
|
|
323
|
+
in_multiline_comment = True
|
|
324
|
+
continue
|
|
325
|
+
|
|
326
|
+
# Check for single-line comments
|
|
327
|
+
if stripped.startswith("//"):
|
|
328
|
+
comment_lines += 1
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
# Check for JavaDoc continuation lines (lines starting with * but not */)
|
|
332
|
+
if stripped.startswith("*") and not stripped.startswith("*/"):
|
|
333
|
+
comment_lines += 1
|
|
334
|
+
continue
|
|
335
|
+
|
|
336
|
+
# Check for other comment types based on language
|
|
337
|
+
if language == "python" and stripped.startswith("#"):
|
|
338
|
+
comment_lines += 1
|
|
339
|
+
continue
|
|
340
|
+
elif language == "sql" and stripped.startswith("--"):
|
|
341
|
+
comment_lines += 1
|
|
342
|
+
continue
|
|
343
|
+
elif language in ["html", "xml"] and stripped.startswith("<!--"):
|
|
344
|
+
comment_lines += 1
|
|
345
|
+
if "-->" not in stripped:
|
|
346
|
+
in_multiline_comment = True
|
|
347
|
+
continue
|
|
348
|
+
elif in_multiline_comment and "-->" in stripped:
|
|
349
|
+
comment_lines += 1
|
|
350
|
+
in_multiline_comment = False
|
|
351
|
+
continue
|
|
352
|
+
|
|
353
|
+
# If not a comment, it's code
|
|
354
|
+
code_lines += 1
|
|
355
|
+
|
|
356
|
+
# Ensure the sum equals total_lines (handle any rounding errors)
|
|
357
|
+
calculated_total = code_lines + comment_lines + blank_lines
|
|
358
|
+
if calculated_total != total_lines:
|
|
359
|
+
# Adjust code_lines to match total
|
|
360
|
+
code_lines = total_lines - comment_lines - blank_lines
|
|
361
|
+
# Ensure code_lines is not negative
|
|
362
|
+
code_lines = max(0, code_lines)
|
|
363
|
+
|
|
364
|
+
return {
|
|
365
|
+
"total_lines": total_lines,
|
|
366
|
+
"code_lines": code_lines,
|
|
367
|
+
"comment_lines": comment_lines,
|
|
368
|
+
"blank_lines": blank_lines,
|
|
369
|
+
}
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.error(f"Error calculating file metrics for {file_path}: {e}")
|
|
372
|
+
return {
|
|
373
|
+
"total_lines": 0,
|
|
374
|
+
"code_lines": 0,
|
|
375
|
+
"comment_lines": 0,
|
|
376
|
+
"blank_lines": 0,
|
|
377
|
+
}
|
|
378
|
+
|
|
237
379
|
def create_server(self) -> Server:
|
|
238
380
|
"""
|
|
239
381
|
Create and configure the MCP server.
|
|
@@ -359,14 +501,8 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
359
501
|
if "file_path" not in arguments:
|
|
360
502
|
raise ValueError("file_path parameter is required")
|
|
361
503
|
|
|
362
|
-
#
|
|
363
|
-
|
|
364
|
-
"file_path": arguments["file_path"],
|
|
365
|
-
"language": arguments.get("language"),
|
|
366
|
-
"include_complexity": arguments.get("include_complexity", True),
|
|
367
|
-
"include_details": arguments.get("include_details", False),
|
|
368
|
-
}
|
|
369
|
-
result = await self._analyze_code_scale(full_args)
|
|
504
|
+
# Use the original _analyze_code_scale method for backward compatibility
|
|
505
|
+
result = await self._analyze_code_scale(arguments)
|
|
370
506
|
|
|
371
507
|
elif name == "analyze_code_structure":
|
|
372
508
|
if "file_path" not in arguments:
|
|
@@ -504,12 +640,28 @@ class TreeSitterAnalyzerMCPServer:
|
|
|
504
640
|
|
|
505
641
|
def set_project_path(self, project_path: str) -> None:
|
|
506
642
|
"""
|
|
507
|
-
Set the project path for
|
|
643
|
+
Set the project path for all components
|
|
508
644
|
|
|
509
645
|
Args:
|
|
510
646
|
project_path: Path to the project directory
|
|
511
647
|
"""
|
|
648
|
+
# Update project stats resource
|
|
512
649
|
self.project_stats_resource.set_project_path(project_path)
|
|
650
|
+
|
|
651
|
+
# Update all MCP tools (all inherit from BaseMCPTool)
|
|
652
|
+
self.query_tool.set_project_path(project_path)
|
|
653
|
+
self.read_partial_tool.set_project_path(project_path)
|
|
654
|
+
self.table_format_tool.set_project_path(project_path)
|
|
655
|
+
self.analyze_scale_tool.set_project_path(project_path)
|
|
656
|
+
|
|
657
|
+
# Update universal tool if available
|
|
658
|
+
if hasattr(self, "universal_analyze_tool") and self.universal_analyze_tool:
|
|
659
|
+
self.universal_analyze_tool.set_project_path(project_path)
|
|
660
|
+
|
|
661
|
+
# Update analysis engine and security validator
|
|
662
|
+
self.analysis_engine = get_analysis_engine(project_path)
|
|
663
|
+
self.security_validator = SecurityValidator(project_path)
|
|
664
|
+
|
|
513
665
|
try:
|
|
514
666
|
logger.info(f"Set project path to: {project_path}")
|
|
515
667
|
except (ValueError, OSError):
|
|
@@ -11,17 +11,23 @@ import re
|
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
from typing import Any
|
|
13
13
|
|
|
14
|
+
from ...constants import (
|
|
15
|
+
ELEMENT_TYPE_CLASS,
|
|
16
|
+
ELEMENT_TYPE_FUNCTION,
|
|
17
|
+
ELEMENT_TYPE_IMPORT,
|
|
18
|
+
ELEMENT_TYPE_VARIABLE,
|
|
19
|
+
is_element_of_type,
|
|
20
|
+
)
|
|
14
21
|
from ...core.analysis_engine import AnalysisRequest, get_analysis_engine
|
|
15
22
|
from ...language_detector import detect_language_from_file
|
|
16
|
-
from ...security import SecurityValidator
|
|
17
23
|
from ...utils import setup_logger
|
|
18
|
-
from
|
|
24
|
+
from .base_tool import BaseMCPTool
|
|
19
25
|
|
|
20
26
|
# Set up logging
|
|
21
27
|
logger = setup_logger(__name__)
|
|
22
28
|
|
|
23
29
|
|
|
24
|
-
class AnalyzeScaleTool:
|
|
30
|
+
class AnalyzeScaleTool(BaseMCPTool):
|
|
25
31
|
"""
|
|
26
32
|
MCP Tool for analyzing code scale and complexity metrics.
|
|
27
33
|
|
|
@@ -33,12 +39,21 @@ class AnalyzeScaleTool:
|
|
|
33
39
|
def __init__(self, project_root: str = None) -> None:
|
|
34
40
|
"""Initialize the analyze scale tool."""
|
|
35
41
|
# Use unified analysis engine instead of deprecated AdvancedAnalyzer
|
|
36
|
-
|
|
42
|
+
super().__init__(project_root)
|
|
37
43
|
self.analysis_engine = get_analysis_engine(project_root)
|
|
38
|
-
self.security_validator = SecurityValidator(project_root)
|
|
39
|
-
self.path_resolver = PathResolver(project_root)
|
|
40
44
|
logger.info("AnalyzeScaleTool initialized with security validation")
|
|
41
45
|
|
|
46
|
+
def set_project_path(self, project_path: str) -> None:
|
|
47
|
+
"""
|
|
48
|
+
Update the project path for all components.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
project_path: New project root directory
|
|
52
|
+
"""
|
|
53
|
+
super().set_project_path(project_path)
|
|
54
|
+
self.analysis_engine = get_analysis_engine(project_path)
|
|
55
|
+
logger.info(f"AnalyzeScaleTool project path updated to: {project_path}")
|
|
56
|
+
|
|
42
57
|
def _calculate_file_metrics(self, file_path: str) -> dict[str, Any]:
|
|
43
58
|
"""
|
|
44
59
|
Calculate basic file metrics including line counts and estimated token count.
|
|
@@ -123,7 +138,9 @@ class AnalyzeScaleTool:
|
|
|
123
138
|
|
|
124
139
|
# Extract class information with position from unified analysis engine
|
|
125
140
|
classes = [
|
|
126
|
-
e
|
|
141
|
+
e
|
|
142
|
+
for e in analysis_result.elements
|
|
143
|
+
if is_element_of_type(e, ELEMENT_TYPE_CLASS)
|
|
127
144
|
]
|
|
128
145
|
for cls in classes:
|
|
129
146
|
class_info = {
|
|
@@ -141,7 +158,9 @@ class AnalyzeScaleTool:
|
|
|
141
158
|
|
|
142
159
|
# Extract method information with position and complexity from unified analysis engine
|
|
143
160
|
methods = [
|
|
144
|
-
e
|
|
161
|
+
e
|
|
162
|
+
for e in analysis_result.elements
|
|
163
|
+
if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
|
|
145
164
|
]
|
|
146
165
|
for method in methods:
|
|
147
166
|
method_info = {
|
|
@@ -174,7 +193,9 @@ class AnalyzeScaleTool:
|
|
|
174
193
|
# Extract field information with position
|
|
175
194
|
# Extract field information from unified analysis engine
|
|
176
195
|
fields = [
|
|
177
|
-
e
|
|
196
|
+
e
|
|
197
|
+
for e in analysis_result.elements
|
|
198
|
+
if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
|
|
178
199
|
]
|
|
179
200
|
for field in fields:
|
|
180
201
|
field_info = {
|
|
@@ -192,7 +213,9 @@ class AnalyzeScaleTool:
|
|
|
192
213
|
# Extract import information
|
|
193
214
|
# Extract import information from unified analysis engine
|
|
194
215
|
imports = [
|
|
195
|
-
e
|
|
216
|
+
e
|
|
217
|
+
for e in analysis_result.elements
|
|
218
|
+
if is_element_of_type(e, ELEMENT_TYPE_IMPORT)
|
|
196
219
|
]
|
|
197
220
|
for imp in imports:
|
|
198
221
|
import_info = {
|
|
@@ -459,7 +482,7 @@ class AnalyzeScaleTool:
|
|
|
459
482
|
for e in (
|
|
460
483
|
analysis_result.elements if analysis_result else []
|
|
461
484
|
)
|
|
462
|
-
if e
|
|
485
|
+
if is_element_of_type(e, ELEMENT_TYPE_CLASS)
|
|
463
486
|
]
|
|
464
487
|
),
|
|
465
488
|
"methods": len(
|
|
@@ -468,7 +491,7 @@ class AnalyzeScaleTool:
|
|
|
468
491
|
for e in (
|
|
469
492
|
analysis_result.elements if analysis_result else []
|
|
470
493
|
)
|
|
471
|
-
if e
|
|
494
|
+
if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
|
|
472
495
|
]
|
|
473
496
|
),
|
|
474
497
|
"fields": len(
|
|
@@ -477,7 +500,7 @@ class AnalyzeScaleTool:
|
|
|
477
500
|
for e in (
|
|
478
501
|
analysis_result.elements if analysis_result else []
|
|
479
502
|
)
|
|
480
|
-
if e
|
|
503
|
+
if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
|
|
481
504
|
]
|
|
482
505
|
),
|
|
483
506
|
"imports": len(
|
|
@@ -486,7 +509,7 @@ class AnalyzeScaleTool:
|
|
|
486
509
|
for e in (
|
|
487
510
|
analysis_result.elements if analysis_result else []
|
|
488
511
|
)
|
|
489
|
-
if e
|
|
512
|
+
if is_element_of_type(e, ELEMENT_TYPE_IMPORT)
|
|
490
513
|
]
|
|
491
514
|
),
|
|
492
515
|
"annotations": len(
|
|
@@ -530,7 +553,7 @@ class AnalyzeScaleTool:
|
|
|
530
553
|
for e in (
|
|
531
554
|
analysis_result.elements if analysis_result else []
|
|
532
555
|
)
|
|
533
|
-
if e
|
|
556
|
+
if is_element_of_type(e, ELEMENT_TYPE_CLASS)
|
|
534
557
|
]
|
|
535
558
|
],
|
|
536
559
|
"methods": [
|
|
@@ -558,7 +581,7 @@ class AnalyzeScaleTool:
|
|
|
558
581
|
for e in (
|
|
559
582
|
analysis_result.elements if analysis_result else []
|
|
560
583
|
)
|
|
561
|
-
if e
|
|
584
|
+
if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
|
|
562
585
|
]
|
|
563
586
|
],
|
|
564
587
|
"fields": [
|
|
@@ -580,7 +603,7 @@ class AnalyzeScaleTool:
|
|
|
580
603
|
for e in (
|
|
581
604
|
analysis_result.elements if analysis_result else []
|
|
582
605
|
)
|
|
583
|
-
if e
|
|
606
|
+
if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
|
|
584
607
|
]
|
|
585
608
|
],
|
|
586
609
|
}
|
|
@@ -590,14 +613,14 @@ class AnalyzeScaleTool:
|
|
|
590
613
|
[
|
|
591
614
|
e
|
|
592
615
|
for e in (analysis_result.elements if analysis_result else [])
|
|
593
|
-
if e
|
|
616
|
+
if is_element_of_type(e, ELEMENT_TYPE_CLASS)
|
|
594
617
|
]
|
|
595
618
|
)
|
|
596
619
|
methods_count = len(
|
|
597
620
|
[
|
|
598
621
|
e
|
|
599
622
|
for e in (analysis_result.elements if analysis_result else [])
|
|
600
|
-
if e
|
|
623
|
+
if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
|
|
601
624
|
]
|
|
602
625
|
)
|
|
603
626
|
|
|
@@ -2,16 +2,101 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Base Tool Protocol for MCP Tools
|
|
4
4
|
|
|
5
|
-
This module defines the
|
|
6
|
-
to ensure
|
|
5
|
+
This module defines the base class that all MCP tools should inherit from
|
|
6
|
+
to ensure consistent behavior and project path management.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
-
from
|
|
9
|
+
from abc import ABC, abstractmethod
|
|
10
|
+
from typing import Any
|
|
10
11
|
|
|
12
|
+
from ...security import SecurityValidator
|
|
13
|
+
from ...utils import setup_logger
|
|
14
|
+
from ..utils.path_resolver import PathResolver
|
|
11
15
|
|
|
12
|
-
|
|
16
|
+
# Set up logging
|
|
17
|
+
logger = setup_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class BaseMCPTool(ABC):
|
|
21
|
+
"""
|
|
22
|
+
Base class for all MCP tools.
|
|
23
|
+
|
|
24
|
+
Provides common functionality including project path management,
|
|
25
|
+
security validation, and path resolution.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, project_root: str | None = None) -> None:
|
|
29
|
+
"""
|
|
30
|
+
Initialize the base MCP tool.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
project_root: Optional project root directory
|
|
34
|
+
"""
|
|
35
|
+
self.project_root = project_root
|
|
36
|
+
self.security_validator = SecurityValidator(project_root)
|
|
37
|
+
self.path_resolver = PathResolver(project_root)
|
|
38
|
+
logger.debug(
|
|
39
|
+
f"{self.__class__.__name__} initialized with project root: {project_root}"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
def set_project_path(self, project_path: str) -> None:
|
|
43
|
+
"""
|
|
44
|
+
Update the project path for all components.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
project_path: New project root directory
|
|
48
|
+
"""
|
|
49
|
+
self.project_root = project_path
|
|
50
|
+
self.security_validator = SecurityValidator(project_path)
|
|
51
|
+
self.path_resolver = PathResolver(project_path)
|
|
52
|
+
logger.info(
|
|
53
|
+
f"{self.__class__.__name__} project path updated to: {project_path}"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
@abstractmethod
|
|
57
|
+
def get_tool_definition(self) -> Any:
|
|
58
|
+
"""
|
|
59
|
+
Get the MCP tool definition.
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Tool definition object compatible with MCP server
|
|
63
|
+
"""
|
|
64
|
+
pass
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Execute the tool with the given arguments.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
arguments: Tool arguments
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Dictionary containing execution results
|
|
76
|
+
"""
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
@abstractmethod
|
|
80
|
+
def validate_arguments(self, arguments: dict[str, Any]) -> bool:
|
|
81
|
+
"""
|
|
82
|
+
Validate tool arguments.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
arguments: Arguments to validate
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
True if arguments are valid
|
|
89
|
+
|
|
90
|
+
Raises:
|
|
91
|
+
ValueError: If arguments are invalid
|
|
92
|
+
"""
|
|
93
|
+
pass
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# Keep the protocol for backward compatibility
|
|
97
|
+
class MCPTool(BaseMCPTool):
|
|
13
98
|
"""
|
|
14
|
-
Protocol for MCP tools.
|
|
99
|
+
Protocol for MCP tools (deprecated, use BaseMCPTool instead).
|
|
15
100
|
|
|
16
101
|
All MCP tools must implement this protocol to ensure they have
|
|
17
102
|
the required methods for integration with the MCP server.
|
|
@@ -11,22 +11,30 @@ from typing import Any
|
|
|
11
11
|
|
|
12
12
|
from ...core.query_service import QueryService
|
|
13
13
|
from ...language_detector import detect_language_from_file
|
|
14
|
-
from ...security import SecurityValidator
|
|
15
14
|
from ..utils.error_handler import handle_mcp_errors
|
|
16
|
-
from
|
|
15
|
+
from .base_tool import BaseMCPTool
|
|
17
16
|
|
|
18
17
|
logger = logging.getLogger(__name__)
|
|
19
18
|
|
|
20
19
|
|
|
21
|
-
class QueryTool:
|
|
20
|
+
class QueryTool(BaseMCPTool):
|
|
22
21
|
"""MCP query tool providing tree-sitter query functionality"""
|
|
23
22
|
|
|
24
23
|
def __init__(self, project_root: str | None = None) -> None:
|
|
25
24
|
"""Initialize query tool"""
|
|
26
|
-
|
|
25
|
+
super().__init__(project_root)
|
|
27
26
|
self.query_service = QueryService(project_root)
|
|
28
|
-
|
|
29
|
-
|
|
27
|
+
|
|
28
|
+
def set_project_path(self, project_path: str) -> None:
|
|
29
|
+
"""
|
|
30
|
+
Update the project path for all components.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
project_path: New project root directory
|
|
34
|
+
"""
|
|
35
|
+
super().set_project_path(project_path)
|
|
36
|
+
self.query_service = QueryService(project_path)
|
|
37
|
+
logger.info(f"QueryTool project path updated to: {project_path}")
|
|
30
38
|
|
|
31
39
|
def get_tool_definition(self) -> dict[str, Any]:
|
|
32
40
|
"""
|
|
@@ -248,3 +256,62 @@ class QueryTool:
|
|
|
248
256
|
List of available query keys
|
|
249
257
|
"""
|
|
250
258
|
return self.query_service.get_available_queries(language)
|
|
259
|
+
|
|
260
|
+
def validate_arguments(self, arguments: dict[str, Any]) -> bool:
|
|
261
|
+
"""
|
|
262
|
+
Validate tool arguments.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
arguments: Arguments to validate
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
True if arguments are valid
|
|
269
|
+
|
|
270
|
+
Raises:
|
|
271
|
+
ValueError: If arguments are invalid
|
|
272
|
+
"""
|
|
273
|
+
# Check required fields
|
|
274
|
+
if "file_path" not in arguments:
|
|
275
|
+
raise ValueError("file_path is required")
|
|
276
|
+
|
|
277
|
+
# Validate file_path
|
|
278
|
+
file_path = arguments["file_path"]
|
|
279
|
+
if not isinstance(file_path, str):
|
|
280
|
+
raise ValueError("file_path must be a string")
|
|
281
|
+
if not file_path.strip():
|
|
282
|
+
raise ValueError("file_path cannot be empty")
|
|
283
|
+
|
|
284
|
+
# Check that either query_key or query_string is provided
|
|
285
|
+
query_key = arguments.get("query_key")
|
|
286
|
+
query_string = arguments.get("query_string")
|
|
287
|
+
|
|
288
|
+
if not query_key and not query_string:
|
|
289
|
+
raise ValueError("Either query_key or query_string must be provided")
|
|
290
|
+
|
|
291
|
+
# Validate query_key if provided
|
|
292
|
+
if query_key and not isinstance(query_key, str):
|
|
293
|
+
raise ValueError("query_key must be a string")
|
|
294
|
+
|
|
295
|
+
# Validate query_string if provided
|
|
296
|
+
if query_string and not isinstance(query_string, str):
|
|
297
|
+
raise ValueError("query_string must be a string")
|
|
298
|
+
|
|
299
|
+
# Validate optional fields
|
|
300
|
+
if "language" in arguments:
|
|
301
|
+
language = arguments["language"]
|
|
302
|
+
if not isinstance(language, str):
|
|
303
|
+
raise ValueError("language must be a string")
|
|
304
|
+
|
|
305
|
+
if "filter" in arguments:
|
|
306
|
+
filter_expr = arguments["filter"]
|
|
307
|
+
if not isinstance(filter_expr, str):
|
|
308
|
+
raise ValueError("filter must be a string")
|
|
309
|
+
|
|
310
|
+
if "output_format" in arguments:
|
|
311
|
+
output_format = arguments["output_format"]
|
|
312
|
+
if not isinstance(output_format, str):
|
|
313
|
+
raise ValueError("output_format must be a string")
|
|
314
|
+
if output_format not in ["json", "summary"]:
|
|
315
|
+
raise ValueError("output_format must be one of: json, summary")
|
|
316
|
+
|
|
317
|
+
return True
|