tree-sitter-analyzer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

Files changed (78) hide show
  1. tree_sitter_analyzer/__init__.py +121 -0
  2. tree_sitter_analyzer/__main__.py +12 -0
  3. tree_sitter_analyzer/api.py +539 -0
  4. tree_sitter_analyzer/cli/__init__.py +39 -0
  5. tree_sitter_analyzer/cli/__main__.py +13 -0
  6. tree_sitter_analyzer/cli/commands/__init__.py +27 -0
  7. tree_sitter_analyzer/cli/commands/advanced_command.py +88 -0
  8. tree_sitter_analyzer/cli/commands/base_command.py +155 -0
  9. tree_sitter_analyzer/cli/commands/default_command.py +19 -0
  10. tree_sitter_analyzer/cli/commands/partial_read_command.py +133 -0
  11. tree_sitter_analyzer/cli/commands/query_command.py +82 -0
  12. tree_sitter_analyzer/cli/commands/structure_command.py +121 -0
  13. tree_sitter_analyzer/cli/commands/summary_command.py +93 -0
  14. tree_sitter_analyzer/cli/commands/table_command.py +233 -0
  15. tree_sitter_analyzer/cli/info_commands.py +121 -0
  16. tree_sitter_analyzer/cli_main.py +276 -0
  17. tree_sitter_analyzer/core/__init__.py +20 -0
  18. tree_sitter_analyzer/core/analysis_engine.py +574 -0
  19. tree_sitter_analyzer/core/cache_service.py +330 -0
  20. tree_sitter_analyzer/core/engine.py +560 -0
  21. tree_sitter_analyzer/core/parser.py +288 -0
  22. tree_sitter_analyzer/core/query.py +502 -0
  23. tree_sitter_analyzer/encoding_utils.py +460 -0
  24. tree_sitter_analyzer/exceptions.py +340 -0
  25. tree_sitter_analyzer/file_handler.py +222 -0
  26. tree_sitter_analyzer/formatters/__init__.py +1 -0
  27. tree_sitter_analyzer/formatters/base_formatter.py +168 -0
  28. tree_sitter_analyzer/formatters/formatter_factory.py +74 -0
  29. tree_sitter_analyzer/formatters/java_formatter.py +270 -0
  30. tree_sitter_analyzer/formatters/python_formatter.py +235 -0
  31. tree_sitter_analyzer/interfaces/__init__.py +10 -0
  32. tree_sitter_analyzer/interfaces/cli.py +557 -0
  33. tree_sitter_analyzer/interfaces/cli_adapter.py +319 -0
  34. tree_sitter_analyzer/interfaces/mcp_adapter.py +170 -0
  35. tree_sitter_analyzer/interfaces/mcp_server.py +416 -0
  36. tree_sitter_analyzer/java_analyzer.py +219 -0
  37. tree_sitter_analyzer/language_detector.py +400 -0
  38. tree_sitter_analyzer/language_loader.py +228 -0
  39. tree_sitter_analyzer/languages/__init__.py +11 -0
  40. tree_sitter_analyzer/languages/java_plugin.py +1113 -0
  41. tree_sitter_analyzer/languages/python_plugin.py +712 -0
  42. tree_sitter_analyzer/mcp/__init__.py +32 -0
  43. tree_sitter_analyzer/mcp/resources/__init__.py +47 -0
  44. tree_sitter_analyzer/mcp/resources/code_file_resource.py +213 -0
  45. tree_sitter_analyzer/mcp/resources/project_stats_resource.py +550 -0
  46. tree_sitter_analyzer/mcp/server.py +319 -0
  47. tree_sitter_analyzer/mcp/tools/__init__.py +36 -0
  48. tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +558 -0
  49. tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +245 -0
  50. tree_sitter_analyzer/mcp/tools/base_tool.py +55 -0
  51. tree_sitter_analyzer/mcp/tools/get_positions_tool.py +448 -0
  52. tree_sitter_analyzer/mcp/tools/read_partial_tool.py +302 -0
  53. tree_sitter_analyzer/mcp/tools/table_format_tool.py +359 -0
  54. tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +476 -0
  55. tree_sitter_analyzer/mcp/utils/__init__.py +106 -0
  56. tree_sitter_analyzer/mcp/utils/error_handler.py +549 -0
  57. tree_sitter_analyzer/models.py +481 -0
  58. tree_sitter_analyzer/output_manager.py +264 -0
  59. tree_sitter_analyzer/plugins/__init__.py +334 -0
  60. tree_sitter_analyzer/plugins/base.py +446 -0
  61. tree_sitter_analyzer/plugins/java_plugin.py +625 -0
  62. tree_sitter_analyzer/plugins/javascript_plugin.py +439 -0
  63. tree_sitter_analyzer/plugins/manager.py +355 -0
  64. tree_sitter_analyzer/plugins/plugin_loader.py +83 -0
  65. tree_sitter_analyzer/plugins/python_plugin.py +598 -0
  66. tree_sitter_analyzer/plugins/registry.py +366 -0
  67. tree_sitter_analyzer/queries/__init__.py +27 -0
  68. tree_sitter_analyzer/queries/java.py +394 -0
  69. tree_sitter_analyzer/queries/javascript.py +149 -0
  70. tree_sitter_analyzer/queries/python.py +286 -0
  71. tree_sitter_analyzer/queries/typescript.py +230 -0
  72. tree_sitter_analyzer/query_loader.py +260 -0
  73. tree_sitter_analyzer/table_formatter.py +448 -0
  74. tree_sitter_analyzer/utils.py +201 -0
  75. tree_sitter_analyzer-0.1.0.dist-info/METADATA +581 -0
  76. tree_sitter_analyzer-0.1.0.dist-info/RECORD +78 -0
  77. tree_sitter_analyzer-0.1.0.dist-info/WHEEL +4 -0
  78. tree_sitter_analyzer-0.1.0.dist-info/entry_points.txt +8 -0
@@ -0,0 +1,476 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Universal Code Analysis Tool for MCP
5
+
6
+ This tool provides universal code analysis capabilities for multiple programming
7
+ languages using the existing language detection and analysis infrastructure.
8
+ """
9
+
10
+ import logging
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ from tree_sitter_analyzer.core.analysis_engine import (
15
+ AnalysisRequest,
16
+ get_analysis_engine,
17
+ )
18
+ from ...core.analysis_engine import get_analysis_engine, AnalysisRequest
19
+ from ...language_detector import detect_language_from_file, is_language_supported
20
+ from ..utils.error_handler import handle_mcp_errors
21
+ from ..utils import get_performance_monitor
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class UniversalAnalyzeTool:
27
+ """
28
+ Universal code analysis tool for multiple programming languages
29
+
30
+ This tool automatically detects the programming language and applies
31
+ the appropriate analyzer to provide comprehensive code analysis.
32
+ """
33
+
34
+ def __init__(self) -> None:
35
+ """Initialize the universal analysis tool"""
36
+ # Use unified analysis engine instead of deprecated AdvancedAnalyzer
37
+ self.analysis_engine = get_analysis_engine()
38
+
39
+ def get_tool_definition(self) -> Dict[str, Any]:
40
+ """
41
+ Get MCP tool definition for universal code analysis
42
+
43
+ Returns:
44
+ Tool definition dictionary
45
+ """
46
+ return {
47
+ "name": "analyze_code_universal",
48
+ "description": "Universal code analysis for multiple programming languages with automatic language detection",
49
+ "inputSchema": {
50
+ "type": "object",
51
+ "properties": {
52
+ "file_path": {
53
+ "type": "string",
54
+ "description": "Path to the code file to analyze",
55
+ },
56
+ "language": {
57
+ "type": "string",
58
+ "description": "Programming language (optional, auto-detected if not specified)",
59
+ },
60
+ "analysis_type": {
61
+ "type": "string",
62
+ "enum": ["basic", "detailed", "structure", "metrics"],
63
+ "description": "Type of analysis to perform",
64
+ "default": "basic",
65
+ },
66
+ "include_ast": {
67
+ "type": "boolean",
68
+ "description": "Include AST information in the analysis",
69
+ "default": False,
70
+ },
71
+ "include_queries": {
72
+ "type": "boolean",
73
+ "description": "Include available query information",
74
+ "default": False,
75
+ },
76
+ },
77
+ "required": ["file_path"],
78
+ "additionalProperties": False,
79
+ },
80
+ }
81
+
82
+
83
+ @handle_mcp_errors("universal_analyze")
84
+ async def execute(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
85
+ """
86
+ Execute universal code analysis
87
+
88
+ Args:
89
+ arguments: Tool arguments containing file_path and optional parameters
90
+
91
+ Returns:
92
+ Dictionary containing analysis results
93
+
94
+ Raises:
95
+ ValueError: If required arguments are missing or invalid
96
+ FileNotFoundError: If the specified file doesn't exist
97
+ """
98
+ # Validate required arguments
99
+ if "file_path" not in arguments:
100
+ raise ValueError("file_path is required")
101
+
102
+ file_path = arguments["file_path"]
103
+ language = arguments.get("language")
104
+ analysis_type = arguments.get("analysis_type", "basic")
105
+ include_ast = arguments.get("include_ast", False)
106
+ include_queries = arguments.get("include_queries", False)
107
+
108
+ # Validate file exists
109
+ if not Path(file_path).exists():
110
+ raise FileNotFoundError(f"File not found: {file_path}")
111
+
112
+ # Detect language if not specified
113
+ if not language:
114
+ language = detect_language_from_file(file_path)
115
+ if language == "unknown":
116
+ raise ValueError(f"Could not detect language for file: {file_path}")
117
+
118
+ # Check if language is supported
119
+ if not is_language_supported(language):
120
+ raise ValueError(f"Language '{language}' is not supported by tree-sitter")
121
+
122
+ # Validate analysis_type
123
+ valid_analysis_types = ["basic", "detailed", "structure", "metrics"]
124
+ if analysis_type not in valid_analysis_types:
125
+ raise ValueError(
126
+ f"Invalid analysis_type '{analysis_type}'. Valid types: {', '.join(valid_analysis_types)}"
127
+ )
128
+
129
+ logger.info(
130
+ f"Analyzing {file_path} (language: {language}, type: {analysis_type})"
131
+ )
132
+
133
+ try:
134
+ monitor = get_performance_monitor()
135
+ with monitor.measure_operation("universal_analyze"):
136
+ # Get appropriate analyzer
137
+ if language == "java":
138
+ # Use advanced analyzer for Java
139
+ result = await self._analyze_with_advanced_analyzer(
140
+ file_path, language, analysis_type, include_ast
141
+ )
142
+ else:
143
+ # Use universal analyzer for other languages
144
+ result = await self._analyze_with_universal_analyzer(
145
+ file_path, language, analysis_type, include_ast
146
+ )
147
+
148
+ # Add query information if requested
149
+ if include_queries:
150
+ result["available_queries"] = await self._get_available_queries(
151
+ language
152
+ )
153
+
154
+ logger.info(f"Successfully analyzed {file_path}")
155
+ return result
156
+
157
+ except Exception as e:
158
+ logger.error(f"Error analyzing {file_path}: {e}")
159
+ raise
160
+
161
+ async def _analyze_with_advanced_analyzer(
162
+ self, file_path: str, language: str, analysis_type: str, include_ast: bool
163
+ ) -> Dict[str, Any]:
164
+ """
165
+ Analyze using the advanced analyzer (Java-specific)
166
+
167
+ Args:
168
+ file_path: Path to the file to analyze
169
+ language: Programming language
170
+ analysis_type: Type of analysis to perform
171
+ include_ast: Whether to include AST information
172
+
173
+ Returns:
174
+ Analysis results dictionary
175
+ """
176
+ # Use unified analysis engine instead of deprecated advanced_analyzer
177
+ request = AnalysisRequest(
178
+ file_path=file_path,
179
+ language=language,
180
+ include_complexity=True,
181
+ include_details=True
182
+ )
183
+ analysis_result = await self.analysis_engine.analyze(request)
184
+
185
+ if analysis_result is None:
186
+ raise RuntimeError(f"Failed to analyze file: {file_path}")
187
+
188
+ # Build base result
189
+ result: Dict[str, Any] = {
190
+ "file_path": file_path,
191
+ "language": language,
192
+ "analyzer_type": "advanced",
193
+ "analysis_type": analysis_type,
194
+ }
195
+
196
+ if analysis_type == "basic":
197
+ result.update(self._extract_basic_metrics(analysis_result))
198
+ elif analysis_type == "detailed":
199
+ result.update(self._extract_detailed_metrics(analysis_result))
200
+ elif analysis_type == "structure":
201
+ result.update(self._extract_structure_info(analysis_result))
202
+ elif analysis_type == "metrics":
203
+ result.update(self._extract_comprehensive_metrics(analysis_result))
204
+
205
+ if include_ast:
206
+ result["ast_info"] = {
207
+ "node_count": getattr(
208
+ analysis_result, "line_count", 0
209
+ ), # Approximation
210
+ "depth": 0, # Advanced analyzer doesn't provide this, use 0 instead of string
211
+ }
212
+
213
+ return result
214
+
215
+ async def _analyze_with_universal_analyzer(
216
+ self, file_path: str, language: str, analysis_type: str, include_ast: bool
217
+ ) -> Dict[str, Any]:
218
+ """
219
+ Analyze using the universal analyzer
220
+
221
+ Args:
222
+ file_path: Path to the file to analyze
223
+ language: Programming language
224
+ analysis_type: Type of analysis to perform
225
+ include_ast: Whether to include AST information
226
+
227
+ Returns:
228
+ Analysis results dictionary
229
+ """
230
+ request = AnalysisRequest(
231
+ file_path=file_path,
232
+ language=language,
233
+ include_details=(analysis_type == "detailed"),
234
+ )
235
+ analysis_result = await self.analysis_engine.analyze(request)
236
+
237
+ if not analysis_result or not analysis_result.success:
238
+ error_message = (
239
+ analysis_result.error_message if analysis_result else "Unknown error"
240
+ )
241
+ raise RuntimeError(
242
+ f"Failed to analyze file: {file_path} - {error_message}"
243
+ )
244
+
245
+ # Convert AnalysisResult to dictionary for consistent processing
246
+ analysis_dict = analysis_result.to_dict()
247
+
248
+ # Build base result
249
+ result: Dict[str, Any] = {
250
+ "file_path": file_path,
251
+ "language": language,
252
+ "analyzer_type": "universal",
253
+ "analysis_type": analysis_type,
254
+ }
255
+
256
+ if analysis_type == "basic":
257
+ result.update(self._extract_universal_basic_metrics(analysis_dict))
258
+ elif analysis_type == "detailed":
259
+ result.update(self._extract_universal_detailed_metrics(analysis_dict))
260
+ elif analysis_type == "structure":
261
+ result.update(self._extract_universal_structure_info(analysis_dict))
262
+ elif analysis_type == "metrics":
263
+ result.update(
264
+ self._extract_universal_comprehensive_metrics(analysis_dict)
265
+ )
266
+
267
+ if include_ast:
268
+ result["ast_info"] = analysis_dict.get("ast_info", {})
269
+
270
+ return result
271
+
272
+ def _extract_basic_metrics(self, analysis_result: Any) -> Dict[str, Any]:
273
+ """Extract basic metrics from advanced analyzer result"""
274
+ stats = analysis_result.get_statistics()
275
+
276
+ return {
277
+ "metrics": {
278
+ "lines_total": analysis_result.line_count,
279
+ "lines_code": stats.get("lines_of_code", 0),
280
+ "lines_comment": stats.get("comment_lines", 0),
281
+ "lines_blank": stats.get("blank_lines", 0),
282
+ "elements": {
283
+ "classes": len([e for e in analysis_result.elements if e.__class__.__name__ == 'Class']),
284
+ "methods": len([e for e in analysis_result.elements if e.__class__.__name__ == 'Function']),
285
+ "fields": len([e for e in analysis_result.elements if e.__class__.__name__ == 'Variable']),
286
+ "imports": len([e for e in analysis_result.elements if e.__class__.__name__ == 'Import']),
287
+ "annotations": len(getattr(analysis_result, "annotations", [])),
288
+ },
289
+ }
290
+ }
291
+
292
+ def _extract_detailed_metrics(self, analysis_result: Any) -> Dict[str, Any]:
293
+ """Extract detailed metrics from advanced analyzer result"""
294
+ basic = self._extract_basic_metrics(analysis_result)
295
+
296
+ # Add complexity metrics
297
+ methods = [e for e in analysis_result.elements if e.__class__.__name__ == 'Function']
298
+ total_complexity = sum(
299
+ getattr(method, 'complexity_score', 0) or 0 for method in methods
300
+ )
301
+
302
+ basic["metrics"]["complexity"] = {
303
+ "total": total_complexity,
304
+ "average": (
305
+ total_complexity / len(methods)
306
+ if methods
307
+ else 0
308
+ ),
309
+ "max": max(
310
+ (getattr(method, 'complexity_score', 0) or 0 for method in methods),
311
+ default=0,
312
+ ),
313
+ }
314
+
315
+ return basic
316
+
317
+ def _extract_structure_info(self, analysis_result: Any) -> Dict[str, Any]:
318
+ """Extract structure information from advanced analyzer result"""
319
+ return {
320
+ "structure": {
321
+ "package": (
322
+ analysis_result.package.name if analysis_result.package else None
323
+ ),
324
+ "classes": [cls.to_summary_item() if hasattr(cls, 'to_summary_item') else {'name': getattr(cls, 'name', 'unknown')} for cls in [e for e in analysis_result.elements if e.__class__.__name__ == 'Class']],
325
+ "methods": [
326
+ method.to_summary_item() if hasattr(method, 'to_summary_item') else {'name': getattr(method, 'name', 'unknown')} for method in [e for e in analysis_result.elements if e.__class__.__name__ == 'Function']
327
+ ],
328
+ "fields": [field.to_summary_item() if hasattr(field, 'to_summary_item') else {'name': getattr(field, 'name', 'unknown')} for field in [e for e in analysis_result.elements if e.__class__.__name__ == 'Variable']],
329
+ "imports": [imp.to_summary_item() if hasattr(imp, 'to_summary_item') else {'name': getattr(imp, 'name', 'unknown')} for imp in [e for e in analysis_result.elements if e.__class__.__name__ == 'Import']],
330
+ "annotations": [
331
+ ann.to_summary_item() if hasattr(ann, 'to_summary_item') else {'name': getattr(ann, 'name', 'unknown')} for ann in getattr(analysis_result, "annotations", [])
332
+ ],
333
+ }
334
+ }
335
+
336
+ def _extract_comprehensive_metrics(self, analysis_result: Any) -> Dict[str, Any]:
337
+ """Extract comprehensive metrics from advanced analyzer result"""
338
+ detailed = self._extract_detailed_metrics(analysis_result)
339
+ structure = self._extract_structure_info(analysis_result)
340
+
341
+ # Combine both
342
+ result = detailed.copy()
343
+ result.update(structure)
344
+
345
+ return result
346
+
347
+ def _extract_universal_basic_metrics(
348
+ self, analysis_result: Dict[str, Any]
349
+ ) -> Dict[str, Any]:
350
+ """Extract basic metrics from universal analyzer result"""
351
+ elements = analysis_result.get("elements", [])
352
+ return {
353
+ "metrics": {
354
+ "lines_total": analysis_result.get("line_count", 0),
355
+ "lines_code": analysis_result.get("line_count", 0), # Approximation
356
+ "lines_comment": 0, # Not available in universal analyzer
357
+ "lines_blank": 0, # Not available in universal analyzer
358
+ "elements": {
359
+ "classes": len([e for e in elements if e.get("__class__", "") == "Class"]),
360
+ "methods": len([e for e in elements if e.get("__class__", "") == "Function"]),
361
+ "fields": len([e for e in elements if e.get("__class__", "") == "Variable"]),
362
+ "imports": len([e for e in elements if e.get("__class__", "") == "Import"]),
363
+ "annotations": 0, # Not available in universal analyzer
364
+ },
365
+ }
366
+ }
367
+
368
+ def _extract_universal_detailed_metrics(
369
+ self, analysis_result: Dict[str, Any]
370
+ ) -> Dict[str, Any]:
371
+ """Extract detailed metrics from universal analyzer result"""
372
+ basic = self._extract_universal_basic_metrics(analysis_result)
373
+
374
+ # Add query results if available
375
+ if "query_results" in analysis_result:
376
+ basic["query_results"] = analysis_result["query_results"]
377
+
378
+ return basic
379
+
380
+ def _extract_universal_structure_info(
381
+ self, analysis_result: Dict[str, Any]
382
+ ) -> Dict[str, Any]:
383
+ """Extract structure information from universal analyzer result"""
384
+ return {
385
+ "structure": analysis_result.get("structure", {}),
386
+ "queries_executed": analysis_result.get("queries_executed", []),
387
+ }
388
+
389
+ def _extract_universal_comprehensive_metrics(
390
+ self, analysis_result: Dict[str, Any]
391
+ ) -> Dict[str, Any]:
392
+ """Extract comprehensive metrics from universal analyzer result"""
393
+ detailed = self._extract_universal_detailed_metrics(analysis_result)
394
+ structure = self._extract_universal_structure_info(analysis_result)
395
+
396
+ # Combine both
397
+ result = detailed.copy()
398
+ result.update(structure)
399
+
400
+ return result
401
+
402
+ async def _get_available_queries(self, language: str) -> Dict[str, Any]:
403
+ """
404
+ Get available queries for the specified language
405
+
406
+ Args:
407
+ language: Programming language
408
+
409
+ Returns:
410
+ Dictionary containing available queries information
411
+ """
412
+ try:
413
+ if language == "java":
414
+ # For Java, we don't have predefined queries in the advanced analyzer
415
+ return {
416
+ "language": language,
417
+ "queries": [],
418
+ "note": "Advanced analyzer uses built-in analysis logic",
419
+ }
420
+ else:
421
+ # For other languages, get from universal analyzer
422
+ queries = self.analysis_engine.get_supported_languages()
423
+ return {"language": language, "queries": queries, "count": len(queries)}
424
+ except Exception as e:
425
+ logger.warning(f"Failed to get queries for {language}: {e}")
426
+ return {"language": language, "queries": [], "error": str(e)}
427
+
428
+ def validate_arguments(self, arguments: Dict[str, Any]) -> bool:
429
+ """
430
+ Validate tool arguments against the schema.
431
+
432
+ Args:
433
+ arguments: Arguments to validate
434
+
435
+ Returns:
436
+ True if arguments are valid
437
+
438
+ Raises:
439
+ ValueError: If arguments are invalid
440
+ """
441
+ # Check required fields
442
+ if "file_path" not in arguments:
443
+ raise ValueError("Required field 'file_path' is missing")
444
+
445
+ # Validate file_path
446
+ file_path = arguments["file_path"]
447
+ if not isinstance(file_path, str):
448
+ raise ValueError("file_path must be a string")
449
+ if not file_path.strip():
450
+ raise ValueError("file_path cannot be empty")
451
+
452
+ # Validate optional fields
453
+ if "language" in arguments:
454
+ language = arguments["language"]
455
+ if not isinstance(language, str):
456
+ raise ValueError("language must be a string")
457
+
458
+ if "analysis_type" in arguments:
459
+ analysis_type = arguments["analysis_type"]
460
+ if not isinstance(analysis_type, str):
461
+ raise ValueError("analysis_type must be a string")
462
+ valid_types = ["basic", "detailed", "structure", "metrics"]
463
+ if analysis_type not in valid_types:
464
+ raise ValueError(f"analysis_type must be one of {valid_types}")
465
+
466
+ if "include_ast" in arguments:
467
+ include_ast = arguments["include_ast"]
468
+ if not isinstance(include_ast, bool):
469
+ raise ValueError("include_ast must be a boolean")
470
+
471
+ if "include_queries" in arguments:
472
+ include_queries = arguments["include_queries"]
473
+ if not isinstance(include_queries, bool):
474
+ raise ValueError("include_queries must be a boolean")
475
+
476
+ return True
@@ -0,0 +1,106 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ MCP Utils Module
5
+
6
+ This module provides utility functions and classes for the MCP server
7
+ including error handling and other utilities.
8
+
9
+ Note: Cache and performance monitoring functionality has been moved to
10
+ the unified core services for better architecture.
11
+ """
12
+
13
+ from typing import Any, Dict
14
+
15
+ # Module metadata
16
+ __version__ = "2.0.0"
17
+ __author__ = "Tree-Sitter Analyzer Team"
18
+
19
+ # MCP Utils capabilities
20
+ MCP_UTILS_CAPABILITIES = {
21
+ "version": "2.0.0",
22
+ "features": [
23
+ "Comprehensive Error Handling",
24
+ "Unified Core Services Integration",
25
+ ],
26
+ "deprecated_features": [
27
+ "LRU Cache with TTL (moved to core.cache_service)",
28
+ "Performance Monitoring (moved to core.analysis_engine)",
29
+ ],
30
+ }
31
+
32
+ # Export main utility classes and functions
33
+ from .error_handler import (
34
+ AnalysisError,
35
+ ErrorCategory,
36
+ ErrorHandler,
37
+ ErrorSeverity,
38
+ FileAccessError,
39
+ MCPError,
40
+ ParsingError,
41
+ ResourceError,
42
+ ValidationError,
43
+ get_error_handler,
44
+ handle_mcp_errors,
45
+ )
46
+
47
+ # Import unified services for backward compatibility
48
+ try:
49
+ from ...core.cache_service import CacheService as UnifiedCacheService
50
+ from ...core.analysis_engine import UnifiedAnalysisEngine
51
+
52
+ # Provide backward compatibility aliases
53
+ class BackwardCompatibleCacheManager:
54
+ """Backward compatible cache manager wrapper"""
55
+ def __init__(self):
56
+ self._cache_service = UnifiedCacheService()
57
+
58
+ def clear_all_caches(self):
59
+ """Backward compatibility: clear all caches"""
60
+ return self._cache_service.clear()
61
+
62
+ def get_cache_stats(self):
63
+ """Backward compatibility: get cache statistics"""
64
+ return self._cache_service.get_stats()
65
+
66
+ def __getattr__(self, name):
67
+ """Delegate other methods to the cache service"""
68
+ return getattr(self._cache_service, name)
69
+
70
+ def get_cache_manager():
71
+ """Backward compatibility: Get unified cache service"""
72
+ return BackwardCompatibleCacheManager()
73
+
74
+ def get_performance_monitor():
75
+ """Backward compatibility: Get unified analysis engine for performance monitoring"""
76
+ return UnifiedAnalysisEngine()
77
+
78
+ except ImportError:
79
+ # Fallback if core services are not available
80
+ def get_cache_manager():
81
+ """Fallback cache manager"""
82
+ return None
83
+
84
+ def get_performance_monitor():
85
+ """Fallback performance monitor"""
86
+ return None
87
+
88
+ __all__ = [
89
+ # Error handling
90
+ "ErrorHandler",
91
+ "MCPError",
92
+ "FileAccessError",
93
+ "ParsingError",
94
+ "AnalysisError",
95
+ "ValidationError",
96
+ "ResourceError",
97
+ "ErrorSeverity",
98
+ "ErrorCategory",
99
+ "handle_mcp_errors",
100
+ "get_error_handler",
101
+ # Backward compatibility
102
+ "get_cache_manager",
103
+ "get_performance_monitor",
104
+ # Module metadata
105
+ "MCP_UTILS_CAPABILITIES",
106
+ ]