tree-sitter-analyzer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

Files changed (78) hide show
  1. tree_sitter_analyzer/__init__.py +121 -0
  2. tree_sitter_analyzer/__main__.py +12 -0
  3. tree_sitter_analyzer/api.py +539 -0
  4. tree_sitter_analyzer/cli/__init__.py +39 -0
  5. tree_sitter_analyzer/cli/__main__.py +13 -0
  6. tree_sitter_analyzer/cli/commands/__init__.py +27 -0
  7. tree_sitter_analyzer/cli/commands/advanced_command.py +88 -0
  8. tree_sitter_analyzer/cli/commands/base_command.py +155 -0
  9. tree_sitter_analyzer/cli/commands/default_command.py +19 -0
  10. tree_sitter_analyzer/cli/commands/partial_read_command.py +133 -0
  11. tree_sitter_analyzer/cli/commands/query_command.py +82 -0
  12. tree_sitter_analyzer/cli/commands/structure_command.py +121 -0
  13. tree_sitter_analyzer/cli/commands/summary_command.py +93 -0
  14. tree_sitter_analyzer/cli/commands/table_command.py +233 -0
  15. tree_sitter_analyzer/cli/info_commands.py +121 -0
  16. tree_sitter_analyzer/cli_main.py +276 -0
  17. tree_sitter_analyzer/core/__init__.py +20 -0
  18. tree_sitter_analyzer/core/analysis_engine.py +574 -0
  19. tree_sitter_analyzer/core/cache_service.py +330 -0
  20. tree_sitter_analyzer/core/engine.py +560 -0
  21. tree_sitter_analyzer/core/parser.py +288 -0
  22. tree_sitter_analyzer/core/query.py +502 -0
  23. tree_sitter_analyzer/encoding_utils.py +460 -0
  24. tree_sitter_analyzer/exceptions.py +340 -0
  25. tree_sitter_analyzer/file_handler.py +222 -0
  26. tree_sitter_analyzer/formatters/__init__.py +1 -0
  27. tree_sitter_analyzer/formatters/base_formatter.py +168 -0
  28. tree_sitter_analyzer/formatters/formatter_factory.py +74 -0
  29. tree_sitter_analyzer/formatters/java_formatter.py +270 -0
  30. tree_sitter_analyzer/formatters/python_formatter.py +235 -0
  31. tree_sitter_analyzer/interfaces/__init__.py +10 -0
  32. tree_sitter_analyzer/interfaces/cli.py +557 -0
  33. tree_sitter_analyzer/interfaces/cli_adapter.py +319 -0
  34. tree_sitter_analyzer/interfaces/mcp_adapter.py +170 -0
  35. tree_sitter_analyzer/interfaces/mcp_server.py +416 -0
  36. tree_sitter_analyzer/java_analyzer.py +219 -0
  37. tree_sitter_analyzer/language_detector.py +400 -0
  38. tree_sitter_analyzer/language_loader.py +228 -0
  39. tree_sitter_analyzer/languages/__init__.py +11 -0
  40. tree_sitter_analyzer/languages/java_plugin.py +1113 -0
  41. tree_sitter_analyzer/languages/python_plugin.py +712 -0
  42. tree_sitter_analyzer/mcp/__init__.py +32 -0
  43. tree_sitter_analyzer/mcp/resources/__init__.py +47 -0
  44. tree_sitter_analyzer/mcp/resources/code_file_resource.py +213 -0
  45. tree_sitter_analyzer/mcp/resources/project_stats_resource.py +550 -0
  46. tree_sitter_analyzer/mcp/server.py +319 -0
  47. tree_sitter_analyzer/mcp/tools/__init__.py +36 -0
  48. tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +558 -0
  49. tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +245 -0
  50. tree_sitter_analyzer/mcp/tools/base_tool.py +55 -0
  51. tree_sitter_analyzer/mcp/tools/get_positions_tool.py +448 -0
  52. tree_sitter_analyzer/mcp/tools/read_partial_tool.py +302 -0
  53. tree_sitter_analyzer/mcp/tools/table_format_tool.py +359 -0
  54. tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +476 -0
  55. tree_sitter_analyzer/mcp/utils/__init__.py +106 -0
  56. tree_sitter_analyzer/mcp/utils/error_handler.py +549 -0
  57. tree_sitter_analyzer/models.py +481 -0
  58. tree_sitter_analyzer/output_manager.py +264 -0
  59. tree_sitter_analyzer/plugins/__init__.py +334 -0
  60. tree_sitter_analyzer/plugins/base.py +446 -0
  61. tree_sitter_analyzer/plugins/java_plugin.py +625 -0
  62. tree_sitter_analyzer/plugins/javascript_plugin.py +439 -0
  63. tree_sitter_analyzer/plugins/manager.py +355 -0
  64. tree_sitter_analyzer/plugins/plugin_loader.py +83 -0
  65. tree_sitter_analyzer/plugins/python_plugin.py +598 -0
  66. tree_sitter_analyzer/plugins/registry.py +366 -0
  67. tree_sitter_analyzer/queries/__init__.py +27 -0
  68. tree_sitter_analyzer/queries/java.py +394 -0
  69. tree_sitter_analyzer/queries/javascript.py +149 -0
  70. tree_sitter_analyzer/queries/python.py +286 -0
  71. tree_sitter_analyzer/queries/typescript.py +230 -0
  72. tree_sitter_analyzer/query_loader.py +260 -0
  73. tree_sitter_analyzer/table_formatter.py +448 -0
  74. tree_sitter_analyzer/utils.py +201 -0
  75. tree_sitter_analyzer-0.1.0.dist-info/METADATA +581 -0
  76. tree_sitter_analyzer-0.1.0.dist-info/RECORD +78 -0
  77. tree_sitter_analyzer-0.1.0.dist-info/WHEEL +4 -0
  78. tree_sitter_analyzer-0.1.0.dist-info/entry_points.txt +8 -0
@@ -0,0 +1,302 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Read Code Partial MCP Tool
5
+
6
+ This tool provides partial file reading functionality through the MCP protocol,
7
+ allowing selective content extraction with line and column range support.
8
+ """
9
+
10
+ import json
11
+ import logging
12
+ from pathlib import Path
13
+ from typing import Any, Dict, Optional
14
+
15
+ from ...file_handler import read_file_partial
16
+ from ...utils import log_performance, setup_logger
17
+
18
+ # Set up logging
19
+ logger = setup_logger(__name__)
20
+
21
+
22
+ class ReadPartialTool:
23
+ """
24
+ MCP Tool for reading partial content from code files.
25
+
26
+ This tool integrates with existing file_handler functionality to provide
27
+ selective file content reading through the MCP protocol.
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ """Initialize the read partial tool."""
32
+ logger.info("ReadPartialTool initialized")
33
+
34
+ def get_tool_schema(self) -> Dict[str, Any]:
35
+ """
36
+ Get the MCP tool schema for read_code_partial.
37
+
38
+ Returns:
39
+ Dictionary containing the tool schema
40
+ """
41
+ return {
42
+ "type": "object",
43
+ "properties": {
44
+ "file_path": {
45
+ "type": "string",
46
+ "description": "Path to the code file to read",
47
+ },
48
+ "start_line": {
49
+ "type": "integer",
50
+ "description": "Starting line number (1-based)",
51
+ "minimum": 1,
52
+ },
53
+ "end_line": {
54
+ "type": "integer",
55
+ "description": "Ending line number (1-based, optional - reads to end if not specified)",
56
+ "minimum": 1,
57
+ },
58
+ "start_column": {
59
+ "type": "integer",
60
+ "description": "Starting column number (0-based, optional)",
61
+ "minimum": 0,
62
+ },
63
+ "end_column": {
64
+ "type": "integer",
65
+ "description": "Ending column number (0-based, optional)",
66
+ "minimum": 0,
67
+ },
68
+ "format": {
69
+ "type": "string",
70
+ "description": "Output format for the content",
71
+ "enum": ["text", "json"],
72
+ "default": "text",
73
+ },
74
+ },
75
+ "required": ["file_path", "start_line"],
76
+ "additionalProperties": False,
77
+ }
78
+
79
+ async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
80
+ """
81
+ Execute the read_code_partial tool.
82
+
83
+ Args:
84
+ arguments: Tool arguments containing file_path, line/column ranges, and format
85
+
86
+ Returns:
87
+ Dictionary containing the partial file content and metadata (CLI --partial-read compatible format)
88
+
89
+ Raises:
90
+ ValueError: If required arguments are missing or invalid
91
+ FileNotFoundError: If the specified file doesn't exist
92
+ """
93
+ # Validate required arguments
94
+ if "file_path" not in arguments:
95
+ raise ValueError("file_path is required")
96
+
97
+ if "start_line" not in arguments:
98
+ raise ValueError("start_line is required")
99
+
100
+ file_path = arguments["file_path"]
101
+ start_line = arguments["start_line"]
102
+ end_line = arguments.get("end_line")
103
+ start_column = arguments.get("start_column")
104
+ end_column = arguments.get("end_column")
105
+ output_format = arguments.get("format", "text")
106
+
107
+ # Validate file exists
108
+ if not Path(file_path).exists():
109
+ raise FileNotFoundError(f"File not found: {file_path}")
110
+
111
+ # Validate line numbers
112
+ if start_line < 1:
113
+ raise ValueError("start_line must be >= 1")
114
+
115
+ if end_line is not None and end_line < start_line:
116
+ raise ValueError("end_line must be >= start_line")
117
+
118
+ # Validate column numbers
119
+ if start_column is not None and start_column < 0:
120
+ raise ValueError("start_column must be >= 0")
121
+
122
+ if end_column is not None and end_column < 0:
123
+ raise ValueError("end_column must be >= 0")
124
+
125
+ logger.info(
126
+ f"Reading partial content from {file_path}: lines {start_line}-{end_line or 'end'}"
127
+ )
128
+
129
+ try:
130
+ # Use existing file_handler functionality
131
+ # Use performance monitoring with proper context manager
132
+ from ...mcp.utils import get_performance_monitor
133
+
134
+ with get_performance_monitor().measure_operation("read_code_partial"):
135
+ content = self._read_file_partial(
136
+ file_path, start_line, end_line, start_column, end_column
137
+ )
138
+
139
+ if content is None:
140
+ raise RuntimeError(
141
+ f"Failed to read partial content from file: {file_path}"
142
+ )
143
+
144
+ # Build result structure compatible with CLI --partial-read format
145
+ result_data = {
146
+ "file_path": file_path,
147
+ "range": {
148
+ "start_line": start_line,
149
+ "end_line": end_line,
150
+ "start_column": start_column,
151
+ "end_column": end_column,
152
+ },
153
+ "content": content,
154
+ "content_length": len(content),
155
+ }
156
+
157
+ # Format as JSON string like CLI does
158
+ json_output = json.dumps(result_data, indent=2, ensure_ascii=False)
159
+
160
+ # Build range info for header
161
+ range_info = f"行 {start_line}"
162
+ if end_line:
163
+ range_info += f"-{end_line}"
164
+
165
+ # Build CLI-compatible output with header and JSON (without log message)
166
+ cli_output = (
167
+ f"--- 部分読み込み結果 ---\n"
168
+ f"ファイル: {file_path}\n"
169
+ f"範囲: {range_info}\n"
170
+ f"読み込み文字数: {len(content)}\n"
171
+ f"{json_output}"
172
+ )
173
+
174
+ logger.info(
175
+ f"Successfully read {len(content)} characters from {file_path}"
176
+ )
177
+
178
+ return {"partial_content_result": cli_output}
179
+
180
+ except Exception as e:
181
+ logger.error(f"Error reading partial content from {file_path}: {e}")
182
+ raise
183
+
184
+ def _read_file_partial(
185
+ self,
186
+ file_path: str,
187
+ start_line: int,
188
+ end_line: Optional[int] = None,
189
+ start_column: Optional[int] = None,
190
+ end_column: Optional[int] = None,
191
+ ) -> Optional[str]:
192
+ """
193
+ Internal method to read partial file content.
194
+
195
+ This method wraps the existing read_file_partial function from file_handler.
196
+
197
+ Args:
198
+ file_path: Path to the file to read
199
+ start_line: Starting line number (1-based)
200
+ end_line: Ending line number (1-based, optional)
201
+ start_column: Starting column number (0-based, optional)
202
+ end_column: Ending column number (0-based, optional)
203
+
204
+ Returns:
205
+ Partial file content as string, or None if error
206
+ """
207
+ return read_file_partial(
208
+ file_path, start_line, end_line, start_column, end_column
209
+ )
210
+
211
+ def validate_arguments(self, arguments: Dict[str, Any]) -> bool:
212
+ """
213
+ Validate tool arguments against the schema.
214
+
215
+ Args:
216
+ arguments: Arguments to validate
217
+
218
+ Returns:
219
+ True if arguments are valid
220
+
221
+ Raises:
222
+ ValueError: If arguments are invalid
223
+ """
224
+ schema = self.get_tool_schema()
225
+ required_fields = schema.get("required", [])
226
+
227
+ # Check required fields
228
+ for field in required_fields:
229
+ if field not in arguments:
230
+ raise ValueError(f"Required field '{field}' is missing")
231
+
232
+ # Validate file_path
233
+ if "file_path" in arguments:
234
+ file_path = arguments["file_path"]
235
+ if not isinstance(file_path, str):
236
+ raise ValueError("file_path must be a string")
237
+ if not file_path.strip():
238
+ raise ValueError("file_path cannot be empty")
239
+
240
+ # Validate start_line
241
+ if "start_line" in arguments:
242
+ start_line = arguments["start_line"]
243
+ if not isinstance(start_line, int):
244
+ raise ValueError("start_line must be an integer")
245
+ if start_line < 1:
246
+ raise ValueError("start_line must be >= 1")
247
+
248
+ # Validate end_line
249
+ if "end_line" in arguments:
250
+ end_line = arguments["end_line"]
251
+ if not isinstance(end_line, int):
252
+ raise ValueError("end_line must be an integer")
253
+ if end_line < 1:
254
+ raise ValueError("end_line must be >= 1")
255
+ if "start_line" in arguments and end_line < arguments["start_line"]:
256
+ raise ValueError("end_line must be >= start_line")
257
+
258
+ # Validate column numbers
259
+ for col_field in ["start_column", "end_column"]:
260
+ if col_field in arguments:
261
+ col_value = arguments[col_field]
262
+ if not isinstance(col_value, int):
263
+ raise ValueError(f"{col_field} must be an integer")
264
+ if col_value < 0:
265
+ raise ValueError(f"{col_field} must be >= 0")
266
+
267
+ # Validate format
268
+ if "format" in arguments:
269
+ format_value = arguments["format"]
270
+ if not isinstance(format_value, str):
271
+ raise ValueError("format must be a string")
272
+ if format_value not in ["text", "json"]:
273
+ raise ValueError("format must be 'text' or 'json'")
274
+
275
+ return True
276
+
277
+ def get_tool_definition(self) -> Any:
278
+ """
279
+ Get the MCP tool definition for read_code_partial.
280
+
281
+ Returns:
282
+ Tool definition object compatible with MCP server
283
+ """
284
+ try:
285
+ from mcp.types import Tool
286
+
287
+ return Tool(
288
+ name="read_code_partial",
289
+ description="Read partial content from code files with line and column range support (equivalent to CLI --partial-read option)",
290
+ inputSchema=self.get_tool_schema(),
291
+ )
292
+ except ImportError:
293
+ # Fallback for when MCP is not available
294
+ return {
295
+ "name": "read_code_partial",
296
+ "description": "Read partial content from code files with line and column range support (equivalent to CLI --partial-read option)",
297
+ "inputSchema": self.get_tool_schema(),
298
+ }
299
+
300
+
301
+ # Tool instance for easy access
302
+ read_partial_tool = ReadPartialTool()
@@ -0,0 +1,359 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Table Format MCP Tool
5
+
6
+ This tool provides table-formatted output for code analysis results through the MCP protocol,
7
+ equivalent to the CLI --table=full option functionality.
8
+ """
9
+
10
+ import logging
11
+ import os
12
+ from pathlib import Path
13
+ from typing import Any, Dict, Optional
14
+
15
+ from ...core.analysis_engine import get_analysis_engine, AnalysisRequest
16
+ from ...language_detector import detect_language_from_file
17
+ from ...table_formatter import TableFormatter
18
+ from ...utils import setup_logger
19
+ from ..utils import get_performance_monitor
20
+
21
+ # Set up logging
22
+ logger = setup_logger(__name__)
23
+
24
+
25
+ class TableFormatTool:
26
+ """
27
+ MCP Tool for formatting code analysis results as tables.
28
+
29
+ This tool integrates with existing table_formatter and analyzer components
30
+ to provide table-formatted output through the MCP protocol, equivalent to
31
+ the CLI --table=full option.
32
+ """
33
+
34
+ def __init__(self) -> None:
35
+ """Initialize the table format tool."""
36
+ self.logger = logger
37
+ self.analysis_engine = get_analysis_engine()
38
+ logger.info("TableFormatTool initialized")
39
+
40
+ def get_tool_schema(self) -> Dict[str, Any]:
41
+ """
42
+ Get the MCP tool schema for format_table.
43
+
44
+ Returns:
45
+ Dictionary containing the tool schema
46
+ """
47
+ return {
48
+ "type": "object",
49
+ "properties": {
50
+ "file_path": {
51
+ "type": "string",
52
+ "description": "Path to the code file to analyze and format",
53
+ },
54
+ "format_type": {
55
+ "type": "string",
56
+ "description": "Table format type",
57
+ "enum": ["full", "compact", "csv"],
58
+ "default": "full",
59
+ },
60
+ "language": {
61
+ "type": "string",
62
+ "description": "Programming language (optional, auto-detected if not specified)",
63
+ },
64
+ },
65
+ "required": ["file_path"],
66
+ "additionalProperties": False,
67
+ }
68
+
69
+ def validate_arguments(self, arguments: Dict[str, Any]) -> bool:
70
+ """
71
+ Validate tool arguments.
72
+
73
+ Args:
74
+ arguments: Dictionary of arguments to validate
75
+
76
+ Returns:
77
+ True if arguments are valid
78
+
79
+ Raises:
80
+ ValueError: If arguments are invalid
81
+ """
82
+ # Check required fields
83
+ if "file_path" not in arguments:
84
+ raise ValueError("Required field 'file_path' is missing")
85
+
86
+ # Validate file_path
87
+ file_path = arguments["file_path"]
88
+ if not isinstance(file_path, str):
89
+ raise ValueError("file_path must be a string")
90
+ if not file_path.strip():
91
+ raise ValueError("file_path cannot be empty")
92
+
93
+ # Validate format_type if provided
94
+ if "format_type" in arguments:
95
+ format_type = arguments["format_type"]
96
+ if not isinstance(format_type, str):
97
+ raise ValueError("format_type must be a string")
98
+ if format_type not in ["full", "compact", "csv"]:
99
+ raise ValueError("format_type must be one of: full, compact, csv")
100
+
101
+ # Validate language if provided
102
+ if "language" in arguments:
103
+ language = arguments["language"]
104
+ if not isinstance(language, str):
105
+ raise ValueError("language must be a string")
106
+
107
+ return True
108
+
109
+ def _convert_parameters(self, parameters):
110
+ """Convert parameters to expected format"""
111
+ result = []
112
+ for param in parameters:
113
+ if isinstance(param, dict):
114
+ result.append({
115
+ "name": param.get('name', 'param'),
116
+ "type": param.get('type', 'Object')
117
+ })
118
+ else:
119
+ result.append({
120
+ "name": getattr(param, 'name', 'param'),
121
+ "type": getattr(param, 'param_type', 'Object')
122
+ })
123
+ return result
124
+
125
+ def _get_method_modifiers(self, method) -> list:
126
+ """Extract method modifiers as a list"""
127
+ modifiers = []
128
+ if getattr(method, 'is_static', False):
129
+ modifiers.append('static')
130
+ if getattr(method, 'is_final', False):
131
+ modifiers.append('final')
132
+ if getattr(method, 'is_abstract', False):
133
+ modifiers.append('abstract')
134
+ return modifiers
135
+
136
+ def _get_method_parameters(self, method):
137
+ """Get method parameters in the correct format for TableFormatter"""
138
+ parameters = getattr(method, 'parameters', [])
139
+
140
+ # If parameters is already a list of strings (like "int value"), convert to dict format
141
+ if parameters and isinstance(parameters[0], str):
142
+ result = []
143
+ for param_str in parameters:
144
+ parts = param_str.strip().split()
145
+ if len(parts) >= 2:
146
+ param_type = ' '.join(parts[:-1]) # Everything except last part is type
147
+ param_name = parts[-1] # Last part is name
148
+ result.append({
149
+ "name": param_name,
150
+ "type": param_type
151
+ })
152
+ elif len(parts) == 1:
153
+ # Only type, no name
154
+ result.append({
155
+ "name": "param",
156
+ "type": parts[0]
157
+ })
158
+ return result
159
+
160
+ # Fallback to original conversion method
161
+ return self._convert_parameters(parameters)
162
+
163
+ def _get_field_modifiers(self, field) -> list:
164
+ """Extract field modifiers as a list"""
165
+ modifiers = []
166
+
167
+ # Add visibility to modifiers for CLI compatibility
168
+ visibility = getattr(field, 'visibility', 'private')
169
+ if visibility and visibility != 'package':
170
+ modifiers.append(visibility)
171
+
172
+ if getattr(field, 'is_static', False):
173
+ modifiers.append('static')
174
+ if getattr(field, 'is_final', False):
175
+ modifiers.append('final')
176
+ return modifiers
177
+
178
+ def _convert_analysis_result_to_dict(self, result) -> Dict[str, Any]:
179
+ """Convert AnalysisResult to dictionary format expected by TableFormatter"""
180
+ # Extract elements by type
181
+ classes = [e for e in result.elements if e.__class__.__name__ == 'Class']
182
+ methods = [e for e in result.elements if e.__class__.__name__ == 'Function']
183
+ fields = [e for e in result.elements if e.__class__.__name__ == 'Variable']
184
+ imports = [e for e in result.elements if e.__class__.__name__ == 'Import']
185
+ packages = [e for e in result.elements if e.__class__.__name__ == 'Package']
186
+
187
+ # Convert package to expected format
188
+ package_info = None
189
+ if packages:
190
+ package_info = {"name": packages[0].name}
191
+
192
+ return {
193
+ "file_path": result.file_path,
194
+ "language": result.language,
195
+ "package": package_info,
196
+ "classes": [
197
+ {
198
+ "name": getattr(cls, 'name', 'unknown'),
199
+ "line_range": {
200
+ "start": getattr(cls, 'start_line', 0),
201
+ "end": getattr(cls, 'end_line', 0)
202
+ },
203
+ "type": getattr(cls, 'class_type', 'class'),
204
+ "visibility": "public", # Force all classes to public for CLI compatibility
205
+ "extends": getattr(cls, 'extends_class', None),
206
+ "implements": getattr(cls, 'implements_interfaces', []),
207
+ "annotations": []
208
+ } for cls in classes
209
+ ],
210
+ "methods": [
211
+ {
212
+ "name": getattr(method, 'name', 'unknown'),
213
+ "line_range": {
214
+ "start": getattr(method, 'start_line', 0),
215
+ "end": getattr(method, 'end_line', 0)
216
+ },
217
+ "return_type": getattr(method, 'return_type', 'void'),
218
+ "parameters": self._get_method_parameters(method),
219
+ "visibility": getattr(method, 'visibility', 'public'),
220
+ "is_static": getattr(method, 'is_static', False),
221
+ "is_constructor": getattr(method, 'is_constructor', False),
222
+ "complexity_score": getattr(method, 'complexity_score', 0),
223
+ "modifiers": self._get_method_modifiers(method),
224
+ "annotations": []
225
+ } for method in methods
226
+ ],
227
+ "fields": [
228
+ {
229
+ "name": getattr(field, 'name', 'unknown'),
230
+ "type": getattr(field, 'field_type', 'Object'),
231
+ "line_range": {
232
+ "start": getattr(field, 'start_line', 0),
233
+ "end": getattr(field, 'end_line', 0)
234
+ },
235
+ "visibility": getattr(field, 'visibility', 'private'),
236
+ "modifiers": self._get_field_modifiers(field),
237
+ "annotations": []
238
+ } for field in fields
239
+ ],
240
+ "imports": [
241
+ {
242
+ "name": getattr(imp, 'name', 'unknown'),
243
+ "statement": getattr(imp, 'name', ''), # Use name for CLI compatibility
244
+ "is_static": getattr(imp, 'is_static', False),
245
+ "is_wildcard": getattr(imp, 'is_wildcard', False)
246
+ } for imp in imports
247
+ ],
248
+ "statistics": {
249
+ "class_count": len(classes),
250
+ "method_count": len(methods),
251
+ "field_count": len(fields),
252
+ "import_count": len(imports),
253
+ "total_lines": result.line_count
254
+ }
255
+ }
256
+
257
+ async def execute(self, args: Dict[str, Any]) -> Dict[str, Any]:
258
+ """Execute table formatting tool."""
259
+ try:
260
+ # Validate arguments first
261
+ if "file_path" not in args:
262
+ raise ValueError("file_path is required")
263
+
264
+ file_path = args["file_path"]
265
+ format_type = args.get("format_type", "full")
266
+ language = args.get("language")
267
+
268
+ # Validate file exists
269
+ if not Path(file_path).exists():
270
+ raise FileNotFoundError(f"File not found: {file_path}")
271
+
272
+ # Detect language if not provided
273
+ if not language:
274
+ language = detect_language_from_file(file_path)
275
+
276
+ # Use performance monitoring
277
+ monitor = get_performance_monitor()
278
+ with monitor.measure_operation("table_format_analysis"):
279
+ # Analyze structure using the unified analysis engine
280
+ request = AnalysisRequest(
281
+ file_path=file_path,
282
+ language=language,
283
+ include_complexity=True,
284
+ include_details=True
285
+ )
286
+ structure_result = await self.analysis_engine.analyze(request)
287
+
288
+ if structure_result is None:
289
+ raise RuntimeError(
290
+ f"Failed to analyze structure for file: {file_path}"
291
+ )
292
+
293
+ # Create table formatter
294
+ formatter = TableFormatter(format_type)
295
+
296
+ # Convert AnalysisResult to dict format for TableFormatter
297
+ structure_dict = self._convert_analysis_result_to_dict(structure_result)
298
+
299
+ # Format table
300
+ table_output = formatter.format_structure(structure_dict)
301
+
302
+ # Ensure output format matches CLI exactly
303
+ # Fix line ending differences: normalize to Unix-style LF (\n)
304
+ table_output = table_output.replace("\r\n", "\n").replace("\r", "\n")
305
+
306
+ # CLI uses sys.stdout.buffer.write() which doesn't add trailing newline
307
+ # Ensure MCP output matches this behavior exactly
308
+ # Remove any trailing whitespace and newlines to match CLI output
309
+ table_output = table_output.rstrip()
310
+
311
+ # Extract metadata from structure dict
312
+ metadata = {}
313
+ if "statistics" in structure_dict:
314
+ stats = structure_dict["statistics"]
315
+ metadata = {
316
+ "classes_count": stats.get("class_count", 0),
317
+ "methods_count": stats.get("method_count", 0),
318
+ "fields_count": stats.get("field_count", 0),
319
+ "total_lines": stats.get("total_lines", 0),
320
+ }
321
+
322
+ return {
323
+ "table_output": table_output,
324
+ "format_type": format_type,
325
+ "file_path": file_path,
326
+ "language": language,
327
+ "metadata": metadata,
328
+ }
329
+
330
+ except Exception as e:
331
+ self.logger.error(f"Error in table format tool: {e}")
332
+ raise
333
+
334
+ def get_tool_definition(self) -> Any:
335
+ """
336
+ Get the MCP tool definition for format_table.
337
+
338
+ Returns:
339
+ Tool definition object compatible with MCP server
340
+ """
341
+ try:
342
+ from mcp.types import Tool
343
+
344
+ return Tool(
345
+ name="format_table",
346
+ description="Format code analysis results as tables (equivalent to CLI --table=full option)",
347
+ inputSchema=self.get_tool_schema(),
348
+ )
349
+ except ImportError:
350
+ # Fallback for when MCP is not available
351
+ return {
352
+ "name": "format_table",
353
+ "description": "Format code analysis results as tables (equivalent to CLI --table=full option)",
354
+ "inputSchema": self.get_tool_schema(),
355
+ }
356
+
357
+
358
+ # Tool instance for easy access
359
+ table_format_tool = TableFormatTool()