tree-sitter-analyzer 1.9.17.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. tree_sitter_analyzer/__init__.py +132 -0
  2. tree_sitter_analyzer/__main__.py +11 -0
  3. tree_sitter_analyzer/api.py +853 -0
  4. tree_sitter_analyzer/cli/__init__.py +39 -0
  5. tree_sitter_analyzer/cli/__main__.py +12 -0
  6. tree_sitter_analyzer/cli/argument_validator.py +89 -0
  7. tree_sitter_analyzer/cli/commands/__init__.py +26 -0
  8. tree_sitter_analyzer/cli/commands/advanced_command.py +226 -0
  9. tree_sitter_analyzer/cli/commands/base_command.py +181 -0
  10. tree_sitter_analyzer/cli/commands/default_command.py +18 -0
  11. tree_sitter_analyzer/cli/commands/find_and_grep_cli.py +188 -0
  12. tree_sitter_analyzer/cli/commands/list_files_cli.py +133 -0
  13. tree_sitter_analyzer/cli/commands/partial_read_command.py +139 -0
  14. tree_sitter_analyzer/cli/commands/query_command.py +109 -0
  15. tree_sitter_analyzer/cli/commands/search_content_cli.py +161 -0
  16. tree_sitter_analyzer/cli/commands/structure_command.py +156 -0
  17. tree_sitter_analyzer/cli/commands/summary_command.py +116 -0
  18. tree_sitter_analyzer/cli/commands/table_command.py +414 -0
  19. tree_sitter_analyzer/cli/info_commands.py +124 -0
  20. tree_sitter_analyzer/cli_main.py +472 -0
  21. tree_sitter_analyzer/constants.py +85 -0
  22. tree_sitter_analyzer/core/__init__.py +15 -0
  23. tree_sitter_analyzer/core/analysis_engine.py +580 -0
  24. tree_sitter_analyzer/core/cache_service.py +333 -0
  25. tree_sitter_analyzer/core/engine.py +585 -0
  26. tree_sitter_analyzer/core/parser.py +293 -0
  27. tree_sitter_analyzer/core/query.py +605 -0
  28. tree_sitter_analyzer/core/query_filter.py +200 -0
  29. tree_sitter_analyzer/core/query_service.py +340 -0
  30. tree_sitter_analyzer/encoding_utils.py +530 -0
  31. tree_sitter_analyzer/exceptions.py +747 -0
  32. tree_sitter_analyzer/file_handler.py +246 -0
  33. tree_sitter_analyzer/formatters/__init__.py +1 -0
  34. tree_sitter_analyzer/formatters/base_formatter.py +201 -0
  35. tree_sitter_analyzer/formatters/csharp_formatter.py +367 -0
  36. tree_sitter_analyzer/formatters/formatter_config.py +197 -0
  37. tree_sitter_analyzer/formatters/formatter_factory.py +84 -0
  38. tree_sitter_analyzer/formatters/formatter_registry.py +377 -0
  39. tree_sitter_analyzer/formatters/formatter_selector.py +96 -0
  40. tree_sitter_analyzer/formatters/go_formatter.py +368 -0
  41. tree_sitter_analyzer/formatters/html_formatter.py +498 -0
  42. tree_sitter_analyzer/formatters/java_formatter.py +423 -0
  43. tree_sitter_analyzer/formatters/javascript_formatter.py +611 -0
  44. tree_sitter_analyzer/formatters/kotlin_formatter.py +268 -0
  45. tree_sitter_analyzer/formatters/language_formatter_factory.py +123 -0
  46. tree_sitter_analyzer/formatters/legacy_formatter_adapters.py +228 -0
  47. tree_sitter_analyzer/formatters/markdown_formatter.py +725 -0
  48. tree_sitter_analyzer/formatters/php_formatter.py +301 -0
  49. tree_sitter_analyzer/formatters/python_formatter.py +830 -0
  50. tree_sitter_analyzer/formatters/ruby_formatter.py +278 -0
  51. tree_sitter_analyzer/formatters/rust_formatter.py +233 -0
  52. tree_sitter_analyzer/formatters/sql_formatter_wrapper.py +689 -0
  53. tree_sitter_analyzer/formatters/sql_formatters.py +536 -0
  54. tree_sitter_analyzer/formatters/typescript_formatter.py +543 -0
  55. tree_sitter_analyzer/formatters/yaml_formatter.py +462 -0
  56. tree_sitter_analyzer/interfaces/__init__.py +9 -0
  57. tree_sitter_analyzer/interfaces/cli.py +535 -0
  58. tree_sitter_analyzer/interfaces/cli_adapter.py +359 -0
  59. tree_sitter_analyzer/interfaces/mcp_adapter.py +224 -0
  60. tree_sitter_analyzer/interfaces/mcp_server.py +428 -0
  61. tree_sitter_analyzer/language_detector.py +553 -0
  62. tree_sitter_analyzer/language_loader.py +271 -0
  63. tree_sitter_analyzer/languages/__init__.py +10 -0
  64. tree_sitter_analyzer/languages/csharp_plugin.py +1076 -0
  65. tree_sitter_analyzer/languages/css_plugin.py +449 -0
  66. tree_sitter_analyzer/languages/go_plugin.py +836 -0
  67. tree_sitter_analyzer/languages/html_plugin.py +496 -0
  68. tree_sitter_analyzer/languages/java_plugin.py +1299 -0
  69. tree_sitter_analyzer/languages/javascript_plugin.py +1622 -0
  70. tree_sitter_analyzer/languages/kotlin_plugin.py +656 -0
  71. tree_sitter_analyzer/languages/markdown_plugin.py +1928 -0
  72. tree_sitter_analyzer/languages/php_plugin.py +862 -0
  73. tree_sitter_analyzer/languages/python_plugin.py +1636 -0
  74. tree_sitter_analyzer/languages/ruby_plugin.py +757 -0
  75. tree_sitter_analyzer/languages/rust_plugin.py +673 -0
  76. tree_sitter_analyzer/languages/sql_plugin.py +2444 -0
  77. tree_sitter_analyzer/languages/typescript_plugin.py +1892 -0
  78. tree_sitter_analyzer/languages/yaml_plugin.py +695 -0
  79. tree_sitter_analyzer/legacy_table_formatter.py +860 -0
  80. tree_sitter_analyzer/mcp/__init__.py +34 -0
  81. tree_sitter_analyzer/mcp/resources/__init__.py +43 -0
  82. tree_sitter_analyzer/mcp/resources/code_file_resource.py +208 -0
  83. tree_sitter_analyzer/mcp/resources/project_stats_resource.py +586 -0
  84. tree_sitter_analyzer/mcp/server.py +869 -0
  85. tree_sitter_analyzer/mcp/tools/__init__.py +28 -0
  86. tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +779 -0
  87. tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +291 -0
  88. tree_sitter_analyzer/mcp/tools/base_tool.py +139 -0
  89. tree_sitter_analyzer/mcp/tools/fd_rg_utils.py +816 -0
  90. tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py +686 -0
  91. tree_sitter_analyzer/mcp/tools/list_files_tool.py +413 -0
  92. tree_sitter_analyzer/mcp/tools/output_format_validator.py +148 -0
  93. tree_sitter_analyzer/mcp/tools/query_tool.py +443 -0
  94. tree_sitter_analyzer/mcp/tools/read_partial_tool.py +464 -0
  95. tree_sitter_analyzer/mcp/tools/search_content_tool.py +836 -0
  96. tree_sitter_analyzer/mcp/tools/table_format_tool.py +572 -0
  97. tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +653 -0
  98. tree_sitter_analyzer/mcp/utils/__init__.py +113 -0
  99. tree_sitter_analyzer/mcp/utils/error_handler.py +569 -0
  100. tree_sitter_analyzer/mcp/utils/file_output_factory.py +217 -0
  101. tree_sitter_analyzer/mcp/utils/file_output_manager.py +322 -0
  102. tree_sitter_analyzer/mcp/utils/gitignore_detector.py +358 -0
  103. tree_sitter_analyzer/mcp/utils/path_resolver.py +414 -0
  104. tree_sitter_analyzer/mcp/utils/search_cache.py +343 -0
  105. tree_sitter_analyzer/models.py +840 -0
  106. tree_sitter_analyzer/mypy_current_errors.txt +2 -0
  107. tree_sitter_analyzer/output_manager.py +255 -0
  108. tree_sitter_analyzer/platform_compat/__init__.py +3 -0
  109. tree_sitter_analyzer/platform_compat/adapter.py +324 -0
  110. tree_sitter_analyzer/platform_compat/compare.py +224 -0
  111. tree_sitter_analyzer/platform_compat/detector.py +67 -0
  112. tree_sitter_analyzer/platform_compat/fixtures.py +228 -0
  113. tree_sitter_analyzer/platform_compat/profiles.py +217 -0
  114. tree_sitter_analyzer/platform_compat/record.py +55 -0
  115. tree_sitter_analyzer/platform_compat/recorder.py +155 -0
  116. tree_sitter_analyzer/platform_compat/report.py +92 -0
  117. tree_sitter_analyzer/plugins/__init__.py +280 -0
  118. tree_sitter_analyzer/plugins/base.py +647 -0
  119. tree_sitter_analyzer/plugins/manager.py +384 -0
  120. tree_sitter_analyzer/project_detector.py +328 -0
  121. tree_sitter_analyzer/queries/__init__.py +27 -0
  122. tree_sitter_analyzer/queries/csharp.py +216 -0
  123. tree_sitter_analyzer/queries/css.py +615 -0
  124. tree_sitter_analyzer/queries/go.py +275 -0
  125. tree_sitter_analyzer/queries/html.py +543 -0
  126. tree_sitter_analyzer/queries/java.py +402 -0
  127. tree_sitter_analyzer/queries/javascript.py +724 -0
  128. tree_sitter_analyzer/queries/kotlin.py +192 -0
  129. tree_sitter_analyzer/queries/markdown.py +258 -0
  130. tree_sitter_analyzer/queries/php.py +95 -0
  131. tree_sitter_analyzer/queries/python.py +859 -0
  132. tree_sitter_analyzer/queries/ruby.py +92 -0
  133. tree_sitter_analyzer/queries/rust.py +223 -0
  134. tree_sitter_analyzer/queries/sql.py +555 -0
  135. tree_sitter_analyzer/queries/typescript.py +871 -0
  136. tree_sitter_analyzer/queries/yaml.py +236 -0
  137. tree_sitter_analyzer/query_loader.py +272 -0
  138. tree_sitter_analyzer/security/__init__.py +22 -0
  139. tree_sitter_analyzer/security/boundary_manager.py +277 -0
  140. tree_sitter_analyzer/security/regex_checker.py +297 -0
  141. tree_sitter_analyzer/security/validator.py +599 -0
  142. tree_sitter_analyzer/table_formatter.py +782 -0
  143. tree_sitter_analyzer/utils/__init__.py +53 -0
  144. tree_sitter_analyzer/utils/logging.py +433 -0
  145. tree_sitter_analyzer/utils/tree_sitter_compat.py +289 -0
  146. tree_sitter_analyzer-1.9.17.1.dist-info/METADATA +485 -0
  147. tree_sitter_analyzer-1.9.17.1.dist-info/RECORD +149 -0
  148. tree_sitter_analyzer-1.9.17.1.dist-info/WHEEL +4 -0
  149. tree_sitter_analyzer-1.9.17.1.dist-info/entry_points.txt +25 -0
@@ -0,0 +1,653 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Universal Analyze Tool for MCP
4
+
5
+ This tool provides universal code analysis capabilities through the MCP protocol,
6
+ supporting multiple programming languages with automatic language detection.
7
+ """
8
+
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+ from ...constants import (
13
+ ELEMENT_TYPE_CLASS,
14
+ ELEMENT_TYPE_FUNCTION,
15
+ ELEMENT_TYPE_IMPORT,
16
+ ELEMENT_TYPE_PACKAGE,
17
+ ELEMENT_TYPE_VARIABLE,
18
+ is_element_of_type,
19
+ )
20
+ from ...core.analysis_engine import AnalysisRequest, get_analysis_engine
21
+ from ...language_detector import detect_language_from_file, is_language_supported
22
+ from ...mcp.utils import get_performance_monitor
23
+ from ...utils import setup_logger
24
+ from ..utils.error_handler import handle_mcp_errors
25
+ from .base_tool import BaseMCPTool
26
+
27
+ # Set up logging
28
+ logger = setup_logger(__name__)
29
+
30
+
31
+ class UniversalAnalyzeTool(BaseMCPTool):
32
+ """
33
+ Universal MCP Tool for code analysis across multiple languages.
34
+
35
+ This tool provides comprehensive code analysis capabilities through the MCP protocol,
36
+ supporting both basic and detailed analysis with language-specific optimizations.
37
+ """
38
+
39
+ def __init__(self, project_root: str | None = None) -> None:
40
+ """Initialize the universal analyze tool."""
41
+ super().__init__(project_root)
42
+ self.analysis_engine = get_analysis_engine(project_root)
43
+ logger.info("UniversalAnalyzeTool initialized with security validation")
44
+
45
+ def set_project_path(self, project_path: str) -> None:
46
+ """
47
+ Update the project path for all components.
48
+
49
+ Args:
50
+ project_path: New project root directory
51
+ """
52
+ super().set_project_path(project_path)
53
+ self.analysis_engine = get_analysis_engine(project_path)
54
+ logger.info(f"UniversalAnalyzeTool project path updated to: {project_path}")
55
+
56
+ def get_tool_definition(self) -> dict[str, Any]:
57
+ """
58
+ Get MCP tool definition for universal code analysis
59
+
60
+ Returns:
61
+ Tool definition dictionary
62
+ """
63
+ return {
64
+ "name": "analyze_code_universal",
65
+ "description": "Universal code analysis for multiple programming languages with automatic language detection",
66
+ "inputSchema": {
67
+ "type": "object",
68
+ "properties": {
69
+ "file_path": {
70
+ "type": "string",
71
+ "description": "Path to the code file to analyze",
72
+ },
73
+ "language": {
74
+ "type": "string",
75
+ "description": "Programming language (optional, auto-detected if not specified)",
76
+ },
77
+ "analysis_type": {
78
+ "type": "string",
79
+ "enum": ["basic", "detailed", "structure", "metrics"],
80
+ "description": "Type of analysis to perform",
81
+ "default": "basic",
82
+ },
83
+ "include_ast": {
84
+ "type": "boolean",
85
+ "description": "Include AST information in the analysis",
86
+ "default": False,
87
+ },
88
+ "include_queries": {
89
+ "type": "boolean",
90
+ "description": "Include available query information",
91
+ "default": False,
92
+ },
93
+ },
94
+ "required": ["file_path"],
95
+ "additionalProperties": False,
96
+ },
97
+ }
98
+
99
+ @handle_mcp_errors("universal_analyze")
100
+ async def execute(self, arguments: dict[str, Any]) -> dict[str, Any]:
101
+ """
102
+ Execute universal code analysis
103
+
104
+ Args:
105
+ arguments: Tool arguments containing file_path and optional parameters
106
+
107
+ Returns:
108
+ Dictionary containing analysis results
109
+
110
+ Raises:
111
+ ValueError: If required arguments are missing or invalid
112
+ FileNotFoundError: If the specified file doesn't exist
113
+ """
114
+ # Validate required arguments
115
+ if "file_path" not in arguments:
116
+ raise ValueError("file_path is required")
117
+
118
+ file_path = arguments["file_path"]
119
+ language = arguments.get("language")
120
+ analysis_type = arguments.get("analysis_type", "basic")
121
+
122
+ # Resolve file path to absolute path
123
+ resolved_file_path = self.path_resolver.resolve(file_path)
124
+ logger.info(f"Analyzing file: {file_path} (resolved to: {resolved_file_path})")
125
+
126
+ # Security validation using resolved path
127
+ is_valid, error_msg = self.security_validator.validate_file_path(
128
+ resolved_file_path
129
+ )
130
+ if not is_valid:
131
+ logger.warning(
132
+ f"Security validation failed for file path: {resolved_file_path} - {error_msg}"
133
+ )
134
+ raise ValueError(f"Invalid file path: {error_msg}")
135
+
136
+ # Sanitize inputs
137
+ if language:
138
+ language = self.security_validator.sanitize_input(language, max_length=50)
139
+ if analysis_type:
140
+ analysis_type = self.security_validator.sanitize_input(
141
+ analysis_type, max_length=50
142
+ )
143
+ include_ast = arguments.get("include_ast", False)
144
+ include_queries = arguments.get("include_queries", False)
145
+
146
+ # Validate file exists
147
+ if not Path(resolved_file_path).exists():
148
+ raise ValueError("Invalid file path: file does not exist")
149
+
150
+ # Detect language if not specified
151
+ if not language:
152
+ language = detect_language_from_file(resolved_file_path)
153
+ if language == "unknown":
154
+ raise ValueError(
155
+ f"Could not detect language for file: {resolved_file_path}"
156
+ )
157
+
158
+ # Check if language is supported
159
+ if not is_language_supported(language):
160
+ raise ValueError(f"Language '{language}' is not supported by tree-sitter")
161
+
162
+ # Validate analysis_type
163
+ valid_analysis_types = ["basic", "detailed", "structure", "metrics"]
164
+ if analysis_type not in valid_analysis_types:
165
+ raise ValueError(
166
+ f"Invalid analysis_type '{analysis_type}'. Valid types: {', '.join(valid_analysis_types)}"
167
+ )
168
+
169
+ logger.info(
170
+ f"Analyzing {resolved_file_path} (language: {language}, type: {analysis_type})"
171
+ )
172
+
173
+ try:
174
+ monitor = get_performance_monitor()
175
+ with monitor.measure_operation("universal_analyze"):
176
+ # Get appropriate analyzer
177
+ if language == "java":
178
+ # Use advanced analyzer for Java
179
+ result = await self._analyze_with_advanced_analyzer(
180
+ resolved_file_path, language, analysis_type, include_ast
181
+ )
182
+ else:
183
+ # Use universal analyzer for other languages
184
+ result = await self._analyze_with_universal_analyzer(
185
+ resolved_file_path, language, analysis_type, include_ast
186
+ )
187
+
188
+ # Add query information if requested
189
+ if include_queries:
190
+ result["available_queries"] = await self._get_available_queries(
191
+ language
192
+ )
193
+
194
+ logger.info(f"Successfully analyzed {resolved_file_path}")
195
+ return result
196
+
197
+ except Exception as e:
198
+ logger.error(f"Error analyzing {resolved_file_path}: {e}")
199
+ raise
200
+
201
+ async def _analyze_with_advanced_analyzer(
202
+ self, file_path: str, language: str, analysis_type: str, include_ast: bool
203
+ ) -> dict[str, Any]:
204
+ """
205
+ Analyze using the advanced analyzer (Java-specific)
206
+
207
+ Args:
208
+ file_path: Path to the file to analyze
209
+ language: Programming language
210
+ analysis_type: Type of analysis to perform
211
+ include_ast: Whether to include AST information
212
+
213
+ Returns:
214
+ Analysis results dictionary
215
+ """
216
+ # Use unified analysis engine instead of deprecated advanced_analyzer
217
+ request = AnalysisRequest(
218
+ file_path=file_path,
219
+ language=language,
220
+ include_complexity=True,
221
+ include_details=True,
222
+ )
223
+ analysis_result = await self.analysis_engine.analyze(request)
224
+
225
+ if analysis_result is None:
226
+ raise RuntimeError(f"Failed to analyze file: {file_path}")
227
+
228
+ # Build base result
229
+ result: dict[str, Any] = {
230
+ "file_path": file_path,
231
+ "language": language,
232
+ "analyzer_type": "advanced",
233
+ "analysis_type": analysis_type,
234
+ }
235
+
236
+ if analysis_type == "basic":
237
+ result.update(self._extract_basic_metrics(analysis_result))
238
+ elif analysis_type == "detailed":
239
+ result.update(self._extract_detailed_metrics(analysis_result))
240
+ elif analysis_type == "structure":
241
+ result.update(self._extract_structure_info(analysis_result))
242
+ elif analysis_type == "metrics":
243
+ result.update(self._extract_comprehensive_metrics(analysis_result))
244
+
245
+ if include_ast:
246
+ result["ast_info"] = {
247
+ "node_count": getattr(
248
+ analysis_result, "line_count", 0
249
+ ), # Approximation
250
+ "depth": 0, # Advanced analyzer doesn't provide this, use 0 instead of string
251
+ }
252
+
253
+ return result
254
+
255
+ async def _analyze_with_universal_analyzer(
256
+ self, file_path: str, language: str, analysis_type: str, include_ast: bool
257
+ ) -> dict[str, Any]:
258
+ """
259
+ Analyze using the universal analyzer
260
+
261
+ Args:
262
+ file_path: Path to the file to analyze
263
+ language: Programming language
264
+ analysis_type: Type of analysis to perform
265
+ include_ast: Whether to include AST information
266
+
267
+ Returns:
268
+ Analysis results dictionary
269
+ """
270
+ request = AnalysisRequest(
271
+ file_path=file_path,
272
+ language=language,
273
+ include_details=(analysis_type == "detailed"),
274
+ )
275
+ analysis_result = await self.analysis_engine.analyze(request)
276
+
277
+ if not analysis_result or not analysis_result.success:
278
+ error_message = (
279
+ analysis_result.error_message if analysis_result else "Unknown error"
280
+ )
281
+ raise RuntimeError(f"Failed to analyze file: {file_path} - {error_message}")
282
+
283
+ # Convert AnalysisResult to dictionary for consistent processing
284
+ analysis_dict = analysis_result.to_dict()
285
+
286
+ # Build base result
287
+ result: dict[str, Any] = {
288
+ "file_path": file_path,
289
+ "language": language,
290
+ "analyzer_type": "universal",
291
+ "analysis_type": analysis_type,
292
+ }
293
+
294
+ if analysis_type == "basic":
295
+ result.update(self._extract_universal_basic_metrics(analysis_dict))
296
+ elif analysis_type == "detailed":
297
+ result.update(self._extract_universal_detailed_metrics(analysis_dict))
298
+ elif analysis_type == "structure":
299
+ result.update(self._extract_universal_structure_info(analysis_dict))
300
+ elif analysis_type == "metrics":
301
+ result.update(self._extract_universal_comprehensive_metrics(analysis_dict))
302
+
303
+ if include_ast:
304
+ result["ast_info"] = analysis_dict.get("ast_info", {})
305
+
306
+ return result
307
+
308
+ def _extract_basic_metrics(self, analysis_result: Any) -> dict[str, Any]:
309
+ """Extract basic metrics from advanced analyzer result"""
310
+ stats = analysis_result.get_statistics()
311
+
312
+ return {
313
+ "metrics": {
314
+ "lines_total": analysis_result.line_count,
315
+ "lines_code": stats.get("lines_of_code", 0),
316
+ "lines_comment": stats.get("comment_lines", 0),
317
+ "lines_blank": stats.get("blank_lines", 0),
318
+ "elements": {
319
+ "classes": len(
320
+ [
321
+ e
322
+ for e in analysis_result.elements
323
+ if is_element_of_type(e, ELEMENT_TYPE_CLASS)
324
+ ]
325
+ ),
326
+ "methods": len(
327
+ [
328
+ e
329
+ for e in analysis_result.elements
330
+ if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
331
+ ]
332
+ ),
333
+ "fields": len(
334
+ [
335
+ e
336
+ for e in analysis_result.elements
337
+ if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
338
+ ]
339
+ ),
340
+ "imports": len(
341
+ [
342
+ e
343
+ for e in analysis_result.elements
344
+ if is_element_of_type(e, ELEMENT_TYPE_IMPORT)
345
+ ]
346
+ ),
347
+ "annotations": len(getattr(analysis_result, "annotations", [])),
348
+ "packages": len(
349
+ [
350
+ e
351
+ for e in analysis_result.elements
352
+ if is_element_of_type(e, ELEMENT_TYPE_PACKAGE)
353
+ ]
354
+ ),
355
+ "total": (
356
+ len(
357
+ [
358
+ e
359
+ for e in analysis_result.elements
360
+ if is_element_of_type(e, ELEMENT_TYPE_CLASS)
361
+ ]
362
+ )
363
+ + len(
364
+ [
365
+ e
366
+ for e in analysis_result.elements
367
+ if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
368
+ ]
369
+ )
370
+ + len(
371
+ [
372
+ e
373
+ for e in analysis_result.elements
374
+ if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
375
+ ]
376
+ )
377
+ + len(
378
+ [
379
+ e
380
+ for e in analysis_result.elements
381
+ if is_element_of_type(e, ELEMENT_TYPE_IMPORT)
382
+ ]
383
+ )
384
+ + len(
385
+ [
386
+ e
387
+ for e in analysis_result.elements
388
+ if is_element_of_type(e, ELEMENT_TYPE_PACKAGE)
389
+ ]
390
+ )
391
+ ),
392
+ },
393
+ }
394
+ }
395
+
396
+ def _extract_detailed_metrics(self, analysis_result: Any) -> dict[str, Any]:
397
+ """Extract detailed metrics from advanced analyzer result"""
398
+ basic = self._extract_basic_metrics(analysis_result)
399
+
400
+ # Add complexity metrics
401
+ methods = [
402
+ e
403
+ for e in analysis_result.elements
404
+ if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
405
+ ]
406
+ total_complexity = sum(
407
+ getattr(method, "complexity_score", 0) or 0 for method in methods
408
+ )
409
+
410
+ basic["metrics"]["complexity"] = {
411
+ "total": total_complexity,
412
+ "average": round(total_complexity / len(methods) if methods else 0, 2),
413
+ "max": max(
414
+ (getattr(method, "complexity_score", 0) or 0 for method in methods),
415
+ default=0,
416
+ ),
417
+ }
418
+
419
+ return basic
420
+
421
+ def _extract_structure_info(self, analysis_result: Any) -> dict[str, Any]:
422
+ """Extract structure information from advanced analyzer result"""
423
+ return {
424
+ "structure": {
425
+ "package": (
426
+ analysis_result.package.name if analysis_result.package else None
427
+ ),
428
+ "classes": [
429
+ (
430
+ cls.to_summary_item()
431
+ if hasattr(cls, "to_summary_item")
432
+ else {"name": getattr(cls, "name", "unknown")}
433
+ )
434
+ for cls in [
435
+ e
436
+ for e in analysis_result.elements
437
+ if is_element_of_type(e, ELEMENT_TYPE_CLASS)
438
+ ]
439
+ ],
440
+ "methods": [
441
+ (
442
+ method.to_summary_item()
443
+ if hasattr(method, "to_summary_item")
444
+ else {"name": getattr(method, "name", "unknown")}
445
+ )
446
+ for method in [
447
+ e
448
+ for e in analysis_result.elements
449
+ if is_element_of_type(e, ELEMENT_TYPE_FUNCTION)
450
+ ]
451
+ ],
452
+ "fields": [
453
+ (
454
+ field.to_summary_item()
455
+ if hasattr(field, "to_summary_item")
456
+ else {"name": getattr(field, "name", "unknown")}
457
+ )
458
+ for field in [
459
+ e
460
+ for e in analysis_result.elements
461
+ if is_element_of_type(e, ELEMENT_TYPE_VARIABLE)
462
+ ]
463
+ ],
464
+ "imports": [
465
+ (
466
+ imp.to_summary_item()
467
+ if hasattr(imp, "to_summary_item")
468
+ else {"name": getattr(imp, "name", "unknown")}
469
+ )
470
+ for imp in [
471
+ e
472
+ for e in analysis_result.elements
473
+ if is_element_of_type(e, ELEMENT_TYPE_IMPORT)
474
+ ]
475
+ ],
476
+ "annotations": [
477
+ (
478
+ ann.to_summary_item()
479
+ if hasattr(ann, "to_summary_item")
480
+ else {"name": getattr(ann, "name", "unknown")}
481
+ )
482
+ for ann in getattr(analysis_result, "annotations", [])
483
+ ],
484
+ }
485
+ }
486
+
487
+ def _extract_comprehensive_metrics(self, analysis_result: Any) -> dict[str, Any]:
488
+ """Extract comprehensive metrics from advanced analyzer result"""
489
+ detailed = self._extract_detailed_metrics(analysis_result)
490
+ structure = self._extract_structure_info(analysis_result)
491
+
492
+ # Combine both
493
+ result = detailed.copy()
494
+ result.update(structure)
495
+
496
+ return result
497
+
498
+ def _extract_universal_basic_metrics(
499
+ self, analysis_result: dict[str, Any]
500
+ ) -> dict[str, Any]:
501
+ """Extract basic metrics from universal analyzer result"""
502
+ elements = analysis_result.get("elements", [])
503
+ return {
504
+ "metrics": {
505
+ "lines_total": analysis_result.get("line_count", 0),
506
+ "lines_code": analysis_result.get("line_count", 0), # Approximation
507
+ "lines_comment": 0, # Not available in universal analyzer
508
+ "lines_blank": 0, # Not available in universal analyzer
509
+ "elements": {
510
+ "classes": len(
511
+ [
512
+ e
513
+ for e in elements
514
+ if hasattr(e, "element_type") and e.element_type == "class"
515
+ ]
516
+ ),
517
+ "methods": len(
518
+ [
519
+ e
520
+ for e in elements
521
+ if hasattr(e, "element_type")
522
+ and e.element_type == "function"
523
+ ]
524
+ ),
525
+ "fields": len(
526
+ [
527
+ e
528
+ for e in elements
529
+ if hasattr(e, "element_type")
530
+ and e.element_type == "variable"
531
+ ]
532
+ ),
533
+ "imports": len(
534
+ [
535
+ e
536
+ for e in elements
537
+ if hasattr(e, "element_type") and e.element_type == "import"
538
+ ]
539
+ ),
540
+ "annotations": 0, # Not available in universal analyzer
541
+ },
542
+ }
543
+ }
544
+
545
+ def _extract_universal_detailed_metrics(
546
+ self, analysis_result: dict[str, Any]
547
+ ) -> dict[str, Any]:
548
+ """Extract detailed metrics from universal analyzer result"""
549
+ basic = self._extract_universal_basic_metrics(analysis_result)
550
+
551
+ # Add query results if available
552
+ if "query_results" in analysis_result:
553
+ basic["query_results"] = analysis_result["query_results"]
554
+
555
+ return basic
556
+
557
+ def _extract_universal_structure_info(
558
+ self, analysis_result: dict[str, Any]
559
+ ) -> dict[str, Any]:
560
+ """Extract structure information from universal analyzer result"""
561
+ return {
562
+ "structure": analysis_result.get("structure", {}),
563
+ "queries_executed": analysis_result.get("queries_executed", []),
564
+ }
565
+
566
+ def _extract_universal_comprehensive_metrics(
567
+ self, analysis_result: dict[str, Any]
568
+ ) -> dict[str, Any]:
569
+ """Extract comprehensive metrics from universal analyzer result"""
570
+ detailed = self._extract_universal_detailed_metrics(analysis_result)
571
+ structure = self._extract_universal_structure_info(analysis_result)
572
+
573
+ # Combine both
574
+ result = detailed.copy()
575
+ result.update(structure)
576
+
577
+ return result
578
+
579
+ async def _get_available_queries(self, language: str) -> dict[str, Any]:
580
+ """
581
+ Get available queries for the specified language
582
+
583
+ Args:
584
+ language: Programming language
585
+
586
+ Returns:
587
+ Dictionary containing available queries information
588
+ """
589
+ try:
590
+ if language == "java":
591
+ # For Java, we don't have predefined queries in the advanced analyzer
592
+ return {
593
+ "language": language,
594
+ "queries": [],
595
+ "note": "Advanced analyzer uses built-in analysis logic",
596
+ }
597
+ else:
598
+ # For other languages, get from universal analyzer
599
+ queries = self.analysis_engine.get_supported_languages()
600
+ return {"language": language, "queries": queries, "count": len(queries)}
601
+ except Exception as e:
602
+ logger.warning(f"Failed to get queries for {language}: {e}")
603
+ return {"language": language, "queries": [], "error": str(e)}
604
+
605
+ def validate_arguments(self, arguments: dict[str, Any]) -> bool:
606
+ """
607
+ Validate tool arguments against the schema.
608
+
609
+ Args:
610
+ arguments: Arguments to validate
611
+
612
+ Returns:
613
+ True if arguments are valid
614
+
615
+ Raises:
616
+ ValueError: If arguments are invalid
617
+ """
618
+ # Check required fields
619
+ if "file_path" not in arguments:
620
+ raise ValueError("Required field 'file_path' is missing")
621
+
622
+ # Validate file_path
623
+ file_path = arguments["file_path"]
624
+ if not isinstance(file_path, str):
625
+ raise ValueError("file_path must be a string")
626
+ if not file_path.strip():
627
+ raise ValueError("file_path cannot be empty")
628
+
629
+ # Validate optional fields
630
+ if "language" in arguments:
631
+ language = arguments["language"]
632
+ if not isinstance(language, str):
633
+ raise ValueError("language must be a string")
634
+
635
+ if "analysis_type" in arguments:
636
+ analysis_type = arguments["analysis_type"]
637
+ if not isinstance(analysis_type, str):
638
+ raise ValueError("analysis_type must be a string")
639
+ valid_types = ["basic", "detailed", "structure", "metrics"]
640
+ if analysis_type not in valid_types:
641
+ raise ValueError(f"analysis_type must be one of {valid_types}")
642
+
643
+ if "include_ast" in arguments:
644
+ include_ast = arguments["include_ast"]
645
+ if not isinstance(include_ast, bool):
646
+ raise ValueError("include_ast must be a boolean")
647
+
648
+ if "include_queries" in arguments:
649
+ include_queries = arguments["include_queries"]
650
+ if not isinstance(include_queries, bool):
651
+ raise ValueError("include_queries must be a boolean")
652
+
653
+ return True