tree-sitter-analyzer 0.8.3__py3-none-any.whl → 0.9.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/__init__.py +132 -132
- tree_sitter_analyzer/__main__.py +11 -11
- tree_sitter_analyzer/api.py +533 -533
- tree_sitter_analyzer/cli/__init__.py +39 -39
- tree_sitter_analyzer/cli/__main__.py +12 -12
- tree_sitter_analyzer/cli/commands/__init__.py +26 -26
- tree_sitter_analyzer/cli/commands/advanced_command.py +88 -88
- tree_sitter_analyzer/cli/commands/base_command.py +182 -180
- tree_sitter_analyzer/cli/commands/structure_command.py +138 -138
- tree_sitter_analyzer/cli/commands/summary_command.py +101 -101
- tree_sitter_analyzer/core/__init__.py +15 -15
- tree_sitter_analyzer/core/analysis_engine.py +74 -78
- tree_sitter_analyzer/core/cache_service.py +320 -320
- tree_sitter_analyzer/core/engine.py +566 -566
- tree_sitter_analyzer/core/parser.py +293 -293
- tree_sitter_analyzer/encoding_utils.py +459 -459
- tree_sitter_analyzer/file_handler.py +210 -210
- tree_sitter_analyzer/formatters/__init__.py +1 -1
- tree_sitter_analyzer/formatters/base_formatter.py +167 -167
- tree_sitter_analyzer/formatters/formatter_factory.py +78 -78
- tree_sitter_analyzer/formatters/java_formatter.py +18 -18
- tree_sitter_analyzer/formatters/python_formatter.py +19 -19
- tree_sitter_analyzer/interfaces/__init__.py +9 -9
- tree_sitter_analyzer/interfaces/cli.py +528 -528
- tree_sitter_analyzer/interfaces/cli_adapter.py +344 -343
- tree_sitter_analyzer/interfaces/mcp_adapter.py +206 -206
- tree_sitter_analyzer/language_detector.py +53 -53
- tree_sitter_analyzer/languages/__init__.py +10 -10
- tree_sitter_analyzer/languages/java_plugin.py +1 -1
- tree_sitter_analyzer/languages/javascript_plugin.py +446 -446
- tree_sitter_analyzer/languages/python_plugin.py +755 -755
- tree_sitter_analyzer/mcp/__init__.py +34 -31
- tree_sitter_analyzer/mcp/resources/__init__.py +44 -44
- tree_sitter_analyzer/mcp/resources/code_file_resource.py +209 -209
- tree_sitter_analyzer/mcp/server.py +623 -436
- tree_sitter_analyzer/mcp/tools/__init__.py +30 -30
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +10 -6
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +247 -242
- tree_sitter_analyzer/mcp/tools/base_tool.py +54 -54
- tree_sitter_analyzer/mcp/tools/read_partial_tool.py +310 -308
- tree_sitter_analyzer/mcp/tools/table_format_tool.py +386 -379
- tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +563 -559
- tree_sitter_analyzer/mcp/utils/__init__.py +107 -107
- tree_sitter_analyzer/models.py +10 -10
- tree_sitter_analyzer/output_manager.py +253 -253
- tree_sitter_analyzer/plugins/__init__.py +280 -280
- tree_sitter_analyzer/plugins/base.py +529 -529
- tree_sitter_analyzer/plugins/manager.py +379 -379
- tree_sitter_analyzer/queries/__init__.py +26 -26
- tree_sitter_analyzer/queries/java.py +391 -391
- tree_sitter_analyzer/queries/javascript.py +148 -148
- tree_sitter_analyzer/queries/python.py +285 -285
- tree_sitter_analyzer/queries/typescript.py +229 -229
- tree_sitter_analyzer/query_loader.py +257 -257
- tree_sitter_analyzer/security/boundary_manager.py +237 -279
- tree_sitter_analyzer/security/validator.py +60 -58
- tree_sitter_analyzer/utils.py +294 -277
- {tree_sitter_analyzer-0.8.3.dist-info → tree_sitter_analyzer-0.9.2.dist-info}/METADATA +28 -19
- tree_sitter_analyzer-0.9.2.dist-info/RECORD +77 -0
- {tree_sitter_analyzer-0.8.3.dist-info → tree_sitter_analyzer-0.9.2.dist-info}/entry_points.txt +1 -0
- tree_sitter_analyzer-0.8.3.dist-info/RECORD +0 -77
- {tree_sitter_analyzer-0.8.3.dist-info → tree_sitter_analyzer-0.9.2.dist-info}/WHEEL +0 -0
|
@@ -1,101 +1,101 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Summary Command
|
|
4
|
-
|
|
5
|
-
Handles summary functionality with specified element types.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from typing import TYPE_CHECKING, Any
|
|
9
|
-
|
|
10
|
-
from ...output_manager import output_data, output_json, output_section
|
|
11
|
-
from .base_command import BaseCommand
|
|
12
|
-
|
|
13
|
-
if TYPE_CHECKING:
|
|
14
|
-
from ...models import AnalysisResult
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class SummaryCommand(BaseCommand):
|
|
18
|
-
"""Command for summary analysis with specified element types."""
|
|
19
|
-
|
|
20
|
-
async def execute_async(self, language: str) -> int:
|
|
21
|
-
analysis_result = await self.analyze_file(language)
|
|
22
|
-
if not analysis_result:
|
|
23
|
-
return 1
|
|
24
|
-
|
|
25
|
-
self._output_summary_analysis(analysis_result)
|
|
26
|
-
return 0
|
|
27
|
-
|
|
28
|
-
def _output_summary_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
|
-
"""Output summary analysis results."""
|
|
30
|
-
output_section("Summary Results")
|
|
31
|
-
|
|
32
|
-
# Get summary types from args (default: classes,methods)
|
|
33
|
-
summary_types = getattr(self.args, "summary", "classes,methods")
|
|
34
|
-
if summary_types:
|
|
35
|
-
requested_types = [t.strip() for t in summary_types.split(",")]
|
|
36
|
-
else:
|
|
37
|
-
requested_types = ["classes", "methods"]
|
|
38
|
-
|
|
39
|
-
# Extract elements by type
|
|
40
|
-
classes = [
|
|
41
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Class"
|
|
42
|
-
]
|
|
43
|
-
methods = [
|
|
44
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Function"
|
|
45
|
-
]
|
|
46
|
-
fields = [
|
|
47
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Variable"
|
|
48
|
-
]
|
|
49
|
-
imports = [
|
|
50
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Import"
|
|
51
|
-
]
|
|
52
|
-
|
|
53
|
-
summary_data: dict[str, Any] = {
|
|
54
|
-
"file_path": analysis_result.file_path,
|
|
55
|
-
"language": analysis_result.language,
|
|
56
|
-
"summary": {},
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
if "classes" in requested_types:
|
|
60
|
-
summary_data["summary"]["classes"] = [
|
|
61
|
-
{"name": getattr(c, "name", "unknown")} for c in classes
|
|
62
|
-
]
|
|
63
|
-
|
|
64
|
-
if "methods" in requested_types:
|
|
65
|
-
summary_data["summary"]["methods"] = [
|
|
66
|
-
{"name": getattr(m, "name", "unknown")} for m in methods
|
|
67
|
-
]
|
|
68
|
-
|
|
69
|
-
if "fields" in requested_types:
|
|
70
|
-
summary_data["summary"]["fields"] = [
|
|
71
|
-
{"name": getattr(f, "name", "unknown")} for f in fields
|
|
72
|
-
]
|
|
73
|
-
|
|
74
|
-
if "imports" in requested_types:
|
|
75
|
-
summary_data["summary"]["imports"] = [
|
|
76
|
-
{"name": getattr(i, "name", "unknown")} for i in imports
|
|
77
|
-
]
|
|
78
|
-
|
|
79
|
-
if self.args.output_format == "json":
|
|
80
|
-
output_json(summary_data)
|
|
81
|
-
else:
|
|
82
|
-
self._output_text_format(summary_data, requested_types)
|
|
83
|
-
|
|
84
|
-
def _output_text_format(self, summary_data: dict, requested_types: list) -> None:
|
|
85
|
-
"""Output summary in human-readable text format."""
|
|
86
|
-
output_data(f"File: {summary_data['file_path']}")
|
|
87
|
-
output_data(f"Language: {summary_data['language']}")
|
|
88
|
-
|
|
89
|
-
for element_type in requested_types:
|
|
90
|
-
if element_type in summary_data["summary"]:
|
|
91
|
-
elements = summary_data["summary"][element_type]
|
|
92
|
-
type_name_map = {
|
|
93
|
-
"classes": "Classes",
|
|
94
|
-
"methods": "Methods",
|
|
95
|
-
"fields": "Fields",
|
|
96
|
-
"imports": "Imports",
|
|
97
|
-
}
|
|
98
|
-
type_name = type_name_map.get(element_type, element_type)
|
|
99
|
-
output_data(f"\n{type_name} ({len(elements)} items):")
|
|
100
|
-
for element in elements:
|
|
101
|
-
output_data(f" - {element['name']}")
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Summary Command
|
|
4
|
+
|
|
5
|
+
Handles summary functionality with specified element types.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import TYPE_CHECKING, Any
|
|
9
|
+
|
|
10
|
+
from ...output_manager import output_data, output_json, output_section
|
|
11
|
+
from .base_command import BaseCommand
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from ...models import AnalysisResult
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SummaryCommand(BaseCommand):
|
|
18
|
+
"""Command for summary analysis with specified element types."""
|
|
19
|
+
|
|
20
|
+
async def execute_async(self, language: str) -> int:
|
|
21
|
+
analysis_result = await self.analyze_file(language)
|
|
22
|
+
if not analysis_result:
|
|
23
|
+
return 1
|
|
24
|
+
|
|
25
|
+
self._output_summary_analysis(analysis_result)
|
|
26
|
+
return 0
|
|
27
|
+
|
|
28
|
+
def _output_summary_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
|
+
"""Output summary analysis results."""
|
|
30
|
+
output_section("Summary Results")
|
|
31
|
+
|
|
32
|
+
# Get summary types from args (default: classes,methods)
|
|
33
|
+
summary_types = getattr(self.args, "summary", "classes,methods")
|
|
34
|
+
if summary_types:
|
|
35
|
+
requested_types = [t.strip() for t in summary_types.split(",")]
|
|
36
|
+
else:
|
|
37
|
+
requested_types = ["classes", "methods"]
|
|
38
|
+
|
|
39
|
+
# Extract elements by type
|
|
40
|
+
classes = [
|
|
41
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Class"
|
|
42
|
+
]
|
|
43
|
+
methods = [
|
|
44
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Function"
|
|
45
|
+
]
|
|
46
|
+
fields = [
|
|
47
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Variable"
|
|
48
|
+
]
|
|
49
|
+
imports = [
|
|
50
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Import"
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
summary_data: dict[str, Any] = {
|
|
54
|
+
"file_path": analysis_result.file_path,
|
|
55
|
+
"language": analysis_result.language,
|
|
56
|
+
"summary": {},
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if "classes" in requested_types:
|
|
60
|
+
summary_data["summary"]["classes"] = [
|
|
61
|
+
{"name": getattr(c, "name", "unknown")} for c in classes
|
|
62
|
+
]
|
|
63
|
+
|
|
64
|
+
if "methods" in requested_types:
|
|
65
|
+
summary_data["summary"]["methods"] = [
|
|
66
|
+
{"name": getattr(m, "name", "unknown")} for m in methods
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
if "fields" in requested_types:
|
|
70
|
+
summary_data["summary"]["fields"] = [
|
|
71
|
+
{"name": getattr(f, "name", "unknown")} for f in fields
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
if "imports" in requested_types:
|
|
75
|
+
summary_data["summary"]["imports"] = [
|
|
76
|
+
{"name": getattr(i, "name", "unknown")} for i in imports
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
if self.args.output_format == "json":
|
|
80
|
+
output_json(summary_data)
|
|
81
|
+
else:
|
|
82
|
+
self._output_text_format(summary_data, requested_types)
|
|
83
|
+
|
|
84
|
+
def _output_text_format(self, summary_data: dict, requested_types: list) -> None:
|
|
85
|
+
"""Output summary in human-readable text format."""
|
|
86
|
+
output_data(f"File: {summary_data['file_path']}")
|
|
87
|
+
output_data(f"Language: {summary_data['language']}")
|
|
88
|
+
|
|
89
|
+
for element_type in requested_types:
|
|
90
|
+
if element_type in summary_data["summary"]:
|
|
91
|
+
elements = summary_data["summary"][element_type]
|
|
92
|
+
type_name_map = {
|
|
93
|
+
"classes": "Classes",
|
|
94
|
+
"methods": "Methods",
|
|
95
|
+
"fields": "Fields",
|
|
96
|
+
"imports": "Imports",
|
|
97
|
+
}
|
|
98
|
+
type_name = type_name_map.get(element_type, element_type)
|
|
99
|
+
output_data(f"\n{type_name} ({len(elements)} items):")
|
|
100
|
+
for element in elements:
|
|
101
|
+
output_data(f" - {element['name']}")
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Core module for tree_sitter_analyzer.
|
|
4
|
-
|
|
5
|
-
This module contains the core components of the new architecture:
|
|
6
|
-
- AnalysisEngine: Main analysis orchestrator
|
|
7
|
-
- Parser: Tree-sitter parsing wrapper
|
|
8
|
-
- QueryExecutor: Query execution engine
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
from .engine import AnalysisEngine
|
|
12
|
-
from .parser import Parser, ParseResult
|
|
13
|
-
from .query import QueryExecutor
|
|
14
|
-
|
|
15
|
-
__all__ = ["AnalysisEngine", "Parser", "ParseResult", "QueryExecutor"]
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Core module for tree_sitter_analyzer.
|
|
4
|
+
|
|
5
|
+
This module contains the core components of the new architecture:
|
|
6
|
+
- AnalysisEngine: Main analysis orchestrator
|
|
7
|
+
- Parser: Tree-sitter parsing wrapper
|
|
8
|
+
- QueryExecutor: Query execution engine
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from .engine import AnalysisEngine
|
|
12
|
+
from .parser import Parser, ParseResult
|
|
13
|
+
from .query import QueryExecutor
|
|
14
|
+
|
|
15
|
+
__all__ = ["AnalysisEngine", "Parser", "ParseResult", "QueryExecutor"]
|
|
@@ -115,7 +115,7 @@ class PerformanceMonitor:
|
|
|
115
115
|
self._total_operations += 1
|
|
116
116
|
|
|
117
117
|
def clear_metrics(self) -> None:
|
|
118
|
-
"""
|
|
118
|
+
"""Clear collected metrics"""
|
|
119
119
|
self._operation_stats.clear()
|
|
120
120
|
self._total_operations = 0
|
|
121
121
|
self._last_duration = 0.0
|
|
@@ -123,7 +123,7 @@ class PerformanceMonitor:
|
|
|
123
123
|
|
|
124
124
|
|
|
125
125
|
class PerformanceContext:
|
|
126
|
-
"""
|
|
126
|
+
"""Performance measurement context"""
|
|
127
127
|
|
|
128
128
|
def __init__(self, operation_name: str, monitor: PerformanceMonitor) -> None:
|
|
129
129
|
self.operation_name = operation_name
|
|
@@ -148,14 +148,14 @@ class PerformanceContext:
|
|
|
148
148
|
@dataclass(frozen=True)
|
|
149
149
|
class AnalysisRequest:
|
|
150
150
|
"""
|
|
151
|
-
|
|
151
|
+
Analysis request
|
|
152
152
|
|
|
153
153
|
Attributes:
|
|
154
|
-
file_path:
|
|
155
|
-
language:
|
|
156
|
-
include_complexity:
|
|
157
|
-
include_details:
|
|
158
|
-
format_type:
|
|
154
|
+
file_path: Path to target file to analyze
|
|
155
|
+
language: Programming language (auto-detected if None)
|
|
156
|
+
include_complexity: Whether to include complexity metrics
|
|
157
|
+
include_details: Whether to include detailed structure info
|
|
158
|
+
format_type: Output format
|
|
159
159
|
"""
|
|
160
160
|
|
|
161
161
|
file_path: str
|
|
@@ -167,13 +167,13 @@ class AnalysisRequest:
|
|
|
167
167
|
@classmethod
|
|
168
168
|
def from_mcp_arguments(cls, arguments: dict[str, Any]) -> "AnalysisRequest":
|
|
169
169
|
"""
|
|
170
|
-
MCP
|
|
170
|
+
Create analysis request from MCP tool arguments
|
|
171
171
|
|
|
172
172
|
Args:
|
|
173
|
-
arguments: MCP
|
|
173
|
+
arguments: MCP argument dictionary
|
|
174
174
|
|
|
175
175
|
Returns:
|
|
176
|
-
|
|
176
|
+
AnalysisRequest
|
|
177
177
|
"""
|
|
178
178
|
return cls(
|
|
179
179
|
file_path=arguments.get("file_path", ""),
|
|
@@ -189,27 +189,26 @@ class AnalysisRequest:
|
|
|
189
189
|
|
|
190
190
|
class UnifiedAnalysisEngine:
|
|
191
191
|
"""
|
|
192
|
-
|
|
192
|
+
Unified analysis engine (revised)
|
|
193
193
|
|
|
194
|
-
CLI
|
|
195
|
-
|
|
196
|
-
リソースの効率的な利用とキャッシュの共有を実現。
|
|
194
|
+
Central engine shared by CLI, MCP and other interfaces, implemented as a
|
|
195
|
+
singleton to enable efficient resource usage and cache sharing.
|
|
197
196
|
|
|
198
|
-
|
|
199
|
-
-
|
|
200
|
-
-
|
|
197
|
+
Improvements:
|
|
198
|
+
- Fix async issues in destructor
|
|
199
|
+
- Provide explicit cleanup() method
|
|
201
200
|
|
|
202
201
|
Attributes:
|
|
203
|
-
_cache_service:
|
|
204
|
-
_plugin_manager:
|
|
205
|
-
_performance_monitor:
|
|
202
|
+
_cache_service: Cache service
|
|
203
|
+
_plugin_manager: Plugin manager
|
|
204
|
+
_performance_monitor: Performance monitor
|
|
206
205
|
"""
|
|
207
206
|
|
|
208
207
|
_instances: Dict[str, "UnifiedAnalysisEngine"] = {}
|
|
209
208
|
_lock: threading.Lock = threading.Lock()
|
|
210
209
|
|
|
211
210
|
def __new__(cls, project_root: str = None) -> "UnifiedAnalysisEngine":
|
|
212
|
-
"""
|
|
211
|
+
"""Singleton instance sharing (project_root aware)"""
|
|
213
212
|
# Create a key based on project_root for different instances
|
|
214
213
|
instance_key = project_root or "default"
|
|
215
214
|
|
|
@@ -224,7 +223,7 @@ class UnifiedAnalysisEngine:
|
|
|
224
223
|
return cls._instances[instance_key]
|
|
225
224
|
|
|
226
225
|
def __init__(self, project_root: str = None) -> None:
|
|
227
|
-
"""
|
|
226
|
+
"""Initialize (executed only once per instance)"""
|
|
228
227
|
if hasattr(self, "_initialized") and self._initialized:
|
|
229
228
|
return
|
|
230
229
|
|
|
@@ -234,7 +233,7 @@ class UnifiedAnalysisEngine:
|
|
|
234
233
|
self._security_validator = SecurityValidator(project_root)
|
|
235
234
|
self._project_root = project_root
|
|
236
235
|
|
|
237
|
-
#
|
|
236
|
+
# Auto-load plugins
|
|
238
237
|
self._load_plugins()
|
|
239
238
|
|
|
240
239
|
self._initialized = True
|
|
@@ -242,7 +241,7 @@ class UnifiedAnalysisEngine:
|
|
|
242
241
|
log_info(f"UnifiedAnalysisEngine initialized with project root: {project_root}")
|
|
243
242
|
|
|
244
243
|
def _load_plugins(self) -> None:
|
|
245
|
-
"""
|
|
244
|
+
"""Auto-load available plugins"""
|
|
246
245
|
log_info("Loading plugins using PluginManager...")
|
|
247
246
|
|
|
248
247
|
try:
|
|
@@ -261,17 +260,17 @@ class UnifiedAnalysisEngine:
|
|
|
261
260
|
|
|
262
261
|
async def analyze(self, request: AnalysisRequest) -> AnalysisResult:
|
|
263
262
|
"""
|
|
264
|
-
|
|
263
|
+
Unified analysis method
|
|
265
264
|
|
|
266
265
|
Args:
|
|
267
|
-
request:
|
|
266
|
+
request: Analysis request
|
|
268
267
|
|
|
269
268
|
Returns:
|
|
270
|
-
|
|
269
|
+
Analysis result
|
|
271
270
|
|
|
272
271
|
Raises:
|
|
273
|
-
UnsupportedLanguageError:
|
|
274
|
-
FileNotFoundError:
|
|
272
|
+
UnsupportedLanguageError: When language is not supported
|
|
273
|
+
FileNotFoundError: When file is not found
|
|
275
274
|
"""
|
|
276
275
|
log_info(f"Starting analysis for {request.file_path}")
|
|
277
276
|
|
|
@@ -281,23 +280,23 @@ class UnifiedAnalysisEngine:
|
|
|
281
280
|
log_error(f"Security validation failed for file path: {request.file_path} - {error_msg}")
|
|
282
281
|
raise ValueError(f"Invalid file path: {error_msg}")
|
|
283
282
|
|
|
284
|
-
#
|
|
283
|
+
# Cache check (shared across CLI/MCP)
|
|
285
284
|
cache_key = self._generate_cache_key(request)
|
|
286
285
|
cached_result = await self._cache_service.get(cache_key)
|
|
287
286
|
if cached_result:
|
|
288
287
|
log_info(f"Cache hit for {request.file_path}")
|
|
289
288
|
return cached_result # type: ignore
|
|
290
289
|
|
|
291
|
-
#
|
|
290
|
+
# Language detection
|
|
292
291
|
language = request.language or self._detect_language(request.file_path)
|
|
293
292
|
log_debug(f"Detected language: {language}")
|
|
294
293
|
|
|
295
|
-
#
|
|
294
|
+
# Debug: inspect registered plugins
|
|
296
295
|
supported_languages = self._plugin_manager.get_supported_languages()
|
|
297
296
|
log_debug(f"Supported languages: {supported_languages}")
|
|
298
297
|
log_debug(f"Looking for plugin for language: {language}")
|
|
299
298
|
|
|
300
|
-
#
|
|
299
|
+
# Get plugin
|
|
301
300
|
plugin = self._plugin_manager.get_plugin(language)
|
|
302
301
|
if not plugin:
|
|
303
302
|
error_msg = f"Language {language} not supported"
|
|
@@ -306,7 +305,7 @@ class UnifiedAnalysisEngine:
|
|
|
306
305
|
|
|
307
306
|
log_debug(f"Found plugin for {language}: {type(plugin)}")
|
|
308
307
|
|
|
309
|
-
#
|
|
308
|
+
# Run analysis (with performance monitoring)
|
|
310
309
|
with self._performance_monitor.measure_operation(f"analyze_{language}"):
|
|
311
310
|
log_debug(f"Calling plugin.analyze_file for {request.file_path}")
|
|
312
311
|
result = await plugin.analyze_file(request.file_path, request)
|
|
@@ -314,11 +313,11 @@ class UnifiedAnalysisEngine:
|
|
|
314
313
|
f"Plugin returned result: success={result.success}, elements={len(result.elements) if result.elements else 0}"
|
|
315
314
|
)
|
|
316
315
|
|
|
317
|
-
#
|
|
316
|
+
# Ensure language field is set
|
|
318
317
|
if result.language == "unknown" or not result.language:
|
|
319
318
|
result.language = language
|
|
320
319
|
|
|
321
|
-
#
|
|
320
|
+
# Save to cache
|
|
322
321
|
await self._cache_service.set(cache_key, result)
|
|
323
322
|
|
|
324
323
|
log_performance(
|
|
@@ -355,13 +354,13 @@ class UnifiedAnalysisEngine:
|
|
|
355
354
|
|
|
356
355
|
def _generate_cache_key(self, request: AnalysisRequest) -> str:
|
|
357
356
|
"""
|
|
358
|
-
|
|
357
|
+
Generate cache key
|
|
359
358
|
|
|
360
359
|
Args:
|
|
361
|
-
request:
|
|
360
|
+
request: Analysis request
|
|
362
361
|
|
|
363
362
|
Returns:
|
|
364
|
-
|
|
363
|
+
Hashed cache key
|
|
365
364
|
"""
|
|
366
365
|
# 一意なキーを生成するための文字列を構築
|
|
367
366
|
key_components = [
|
|
@@ -379,13 +378,13 @@ class UnifiedAnalysisEngine:
|
|
|
379
378
|
|
|
380
379
|
def _detect_language(self, file_path: str) -> str:
|
|
381
380
|
"""
|
|
382
|
-
|
|
381
|
+
Detect language from file extension
|
|
383
382
|
|
|
384
383
|
Args:
|
|
385
|
-
file_path:
|
|
384
|
+
file_path: File path
|
|
386
385
|
|
|
387
386
|
Returns:
|
|
388
|
-
|
|
387
|
+
Detected language name
|
|
389
388
|
"""
|
|
390
389
|
# 簡易的な拡張子ベース検出
|
|
391
390
|
import os
|
|
@@ -410,84 +409,83 @@ class UnifiedAnalysisEngine:
|
|
|
410
409
|
return detected
|
|
411
410
|
|
|
412
411
|
def clear_cache(self) -> None:
|
|
413
|
-
"""
|
|
412
|
+
"""Clear cache (for tests)"""
|
|
414
413
|
self._cache_service.clear()
|
|
415
414
|
log_info("Analysis engine cache cleared")
|
|
416
415
|
|
|
417
416
|
def register_plugin(self, language: str, plugin: BaseLanguagePlugin) -> None:
|
|
418
417
|
"""
|
|
419
|
-
|
|
418
|
+
Register plugin
|
|
420
419
|
|
|
421
420
|
Args:
|
|
422
|
-
language:
|
|
423
|
-
plugin:
|
|
421
|
+
language: Language name (kept for compatibility, not used)
|
|
422
|
+
plugin: Language plugin instance
|
|
424
423
|
"""
|
|
425
424
|
self._plugin_manager.register_plugin(plugin)
|
|
426
425
|
|
|
427
426
|
def get_supported_languages(self) -> list[str]:
|
|
428
427
|
"""
|
|
429
|
-
|
|
428
|
+
Get list of supported languages
|
|
430
429
|
|
|
431
430
|
Returns:
|
|
432
|
-
|
|
431
|
+
List of language names
|
|
433
432
|
"""
|
|
434
433
|
return self._plugin_manager.get_supported_languages()
|
|
435
434
|
|
|
436
435
|
def get_cache_stats(self) -> dict[str, Any]:
|
|
437
436
|
"""
|
|
438
|
-
|
|
437
|
+
Get cache statistics
|
|
439
438
|
|
|
440
439
|
Returns:
|
|
441
|
-
|
|
440
|
+
Cache statistics dictionary
|
|
442
441
|
"""
|
|
443
442
|
return self._cache_service.get_stats()
|
|
444
443
|
|
|
445
444
|
async def invalidate_cache_pattern(self, pattern: str) -> int:
|
|
446
445
|
"""
|
|
447
|
-
|
|
446
|
+
Invalidate cached entries matching a pattern
|
|
448
447
|
|
|
449
448
|
Args:
|
|
450
|
-
pattern:
|
|
449
|
+
pattern: Pattern to match keys
|
|
451
450
|
|
|
452
451
|
Returns:
|
|
453
|
-
|
|
452
|
+
Number of invalidated keys
|
|
454
453
|
"""
|
|
455
454
|
return await self._cache_service.invalidate_pattern(pattern)
|
|
456
455
|
|
|
457
456
|
def measure_operation(self, operation_name: str) -> "PerformanceContext":
|
|
458
457
|
"""
|
|
459
|
-
|
|
458
|
+
Context manager for performance measurement
|
|
460
459
|
|
|
461
460
|
Args:
|
|
462
|
-
operation_name:
|
|
461
|
+
operation_name: Operation name
|
|
463
462
|
|
|
464
463
|
Returns:
|
|
465
|
-
|
|
464
|
+
PerformanceContext
|
|
466
465
|
"""
|
|
467
466
|
return self._performance_monitor.measure_operation(operation_name)
|
|
468
467
|
|
|
469
468
|
def start_monitoring(self) -> None:
|
|
470
|
-
"""
|
|
469
|
+
"""Start performance monitoring"""
|
|
471
470
|
self._performance_monitor.start_monitoring()
|
|
472
471
|
|
|
473
472
|
def stop_monitoring(self) -> None:
|
|
474
|
-
"""
|
|
473
|
+
"""Stop performance monitoring"""
|
|
475
474
|
self._performance_monitor.stop_monitoring()
|
|
476
475
|
|
|
477
476
|
def get_operation_stats(self) -> dict[str, Any]:
|
|
478
|
-
"""
|
|
477
|
+
"""Get operation statistics"""
|
|
479
478
|
return self._performance_monitor.get_operation_stats()
|
|
480
479
|
|
|
481
480
|
def get_performance_summary(self) -> dict[str, Any]:
|
|
482
|
-
"""
|
|
481
|
+
"""Get performance summary"""
|
|
483
482
|
return self._performance_monitor.get_performance_summary()
|
|
484
483
|
|
|
485
484
|
def clear_metrics(self) -> None:
|
|
486
485
|
"""
|
|
487
|
-
|
|
486
|
+
Clear collected performance metrics
|
|
488
487
|
|
|
489
|
-
|
|
490
|
-
テストやデバッグ時に使用されます。
|
|
488
|
+
Resets metrics collected by performance monitoring. Used in tests/debugging.
|
|
491
489
|
"""
|
|
492
490
|
# 新しいパフォーマンスモニターインスタンスを作成してリセット
|
|
493
491
|
self._performance_monitor = PerformanceMonitor()
|
|
@@ -495,10 +493,10 @@ class UnifiedAnalysisEngine:
|
|
|
495
493
|
|
|
496
494
|
def cleanup(self) -> None:
|
|
497
495
|
"""
|
|
498
|
-
|
|
496
|
+
Explicit resource cleanup
|
|
499
497
|
|
|
500
|
-
|
|
501
|
-
|
|
498
|
+
Call explicitly (e.g., at end of tests) to clean up resources and avoid
|
|
499
|
+
async issues in destructors.
|
|
502
500
|
"""
|
|
503
501
|
try:
|
|
504
502
|
if hasattr(self, "_cache_service"):
|
|
@@ -511,11 +509,9 @@ class UnifiedAnalysisEngine:
|
|
|
511
509
|
|
|
512
510
|
def __del__(self) -> None:
|
|
513
511
|
"""
|
|
514
|
-
|
|
512
|
+
Destructor - keep minimal to avoid issues in async contexts
|
|
515
513
|
|
|
516
|
-
|
|
517
|
-
ガベージコレクション時に発生する問題を避けるためです。
|
|
518
|
-
明示的なクリーンアップはcleanup()メソッドを使用してください。
|
|
514
|
+
Performs no cleanup; use cleanup() explicitly when needed.
|
|
519
515
|
"""
|
|
520
516
|
# デストラクタでは何もしない(非同期コンテキストでの問題を避けるため)
|
|
521
517
|
pass
|
|
@@ -523,27 +519,27 @@ class UnifiedAnalysisEngine:
|
|
|
523
519
|
|
|
524
520
|
# 簡易的なプラグイン実装(テスト用)
|
|
525
521
|
class MockLanguagePlugin:
|
|
526
|
-
"""
|
|
522
|
+
"""Mock plugin for testing"""
|
|
527
523
|
|
|
528
524
|
def __init__(self, language: str) -> None:
|
|
529
525
|
self.language = language
|
|
530
526
|
|
|
531
527
|
def get_language_name(self) -> str:
|
|
532
|
-
"""
|
|
528
|
+
"""Get language name"""
|
|
533
529
|
return self.language
|
|
534
530
|
|
|
535
531
|
def get_file_extensions(self) -> list[str]:
|
|
536
|
-
"""
|
|
532
|
+
"""Get supported file extensions"""
|
|
537
533
|
return [f".{self.language}"]
|
|
538
534
|
|
|
539
535
|
def create_extractor(self) -> None:
|
|
540
|
-
"""
|
|
536
|
+
"""Create extractor (mock)"""
|
|
541
537
|
return None
|
|
542
538
|
|
|
543
539
|
async def analyze_file(
|
|
544
540
|
self, file_path: str, request: AnalysisRequest
|
|
545
541
|
) -> AnalysisResult:
|
|
546
|
-
"""
|
|
542
|
+
"""Mock analysis implementation"""
|
|
547
543
|
log_info(f"Mock analysis for {file_path} ({self.language})")
|
|
548
544
|
|
|
549
545
|
# 簡易的な解析結果を返す
|
|
@@ -569,12 +565,12 @@ class MockLanguagePlugin:
|
|
|
569
565
|
|
|
570
566
|
def get_analysis_engine(project_root: str = None) -> UnifiedAnalysisEngine:
|
|
571
567
|
"""
|
|
572
|
-
|
|
568
|
+
Get unified analysis engine instance
|
|
573
569
|
|
|
574
570
|
Args:
|
|
575
571
|
project_root: Project root directory for security validation
|
|
576
572
|
|
|
577
573
|
Returns:
|
|
578
|
-
|
|
574
|
+
Singleton instance of UnifiedAnalysisEngine
|
|
579
575
|
"""
|
|
580
576
|
return UnifiedAnalysisEngine(project_root)
|