tree-sitter-analyzer 0.9.1__py3-none-any.whl → 0.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/__init__.py +132 -132
- tree_sitter_analyzer/__main__.py +11 -11
- tree_sitter_analyzer/api.py +533 -533
- tree_sitter_analyzer/cli/__init__.py +39 -39
- tree_sitter_analyzer/cli/__main__.py +12 -12
- tree_sitter_analyzer/cli/commands/__init__.py +26 -26
- tree_sitter_analyzer/cli/commands/advanced_command.py +88 -88
- tree_sitter_analyzer/cli/commands/base_command.py +181 -178
- tree_sitter_analyzer/cli/commands/structure_command.py +138 -138
- tree_sitter_analyzer/cli/commands/summary_command.py +101 -101
- tree_sitter_analyzer/cli_main.py +7 -3
- tree_sitter_analyzer/core/__init__.py +15 -15
- tree_sitter_analyzer/core/analysis_engine.py +91 -87
- tree_sitter_analyzer/core/cache_service.py +320 -320
- tree_sitter_analyzer/core/engine.py +566 -566
- tree_sitter_analyzer/core/parser.py +293 -293
- tree_sitter_analyzer/encoding_utils.py +459 -459
- tree_sitter_analyzer/file_handler.py +210 -210
- tree_sitter_analyzer/formatters/__init__.py +1 -1
- tree_sitter_analyzer/formatters/base_formatter.py +167 -167
- tree_sitter_analyzer/formatters/formatter_factory.py +78 -78
- tree_sitter_analyzer/formatters/java_formatter.py +18 -18
- tree_sitter_analyzer/formatters/python_formatter.py +19 -19
- tree_sitter_analyzer/interfaces/__init__.py +9 -9
- tree_sitter_analyzer/interfaces/cli.py +528 -528
- tree_sitter_analyzer/interfaces/cli_adapter.py +344 -343
- tree_sitter_analyzer/interfaces/mcp_adapter.py +206 -206
- tree_sitter_analyzer/language_detector.py +53 -53
- tree_sitter_analyzer/languages/__init__.py +10 -10
- tree_sitter_analyzer/languages/java_plugin.py +1 -1
- tree_sitter_analyzer/languages/javascript_plugin.py +446 -446
- tree_sitter_analyzer/languages/python_plugin.py +755 -755
- tree_sitter_analyzer/mcp/__init__.py +34 -45
- tree_sitter_analyzer/mcp/resources/__init__.py +44 -44
- tree_sitter_analyzer/mcp/resources/code_file_resource.py +209 -209
- tree_sitter_analyzer/mcp/server.py +623 -568
- tree_sitter_analyzer/mcp/tools/__init__.py +30 -30
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +681 -673
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +247 -247
- tree_sitter_analyzer/mcp/tools/base_tool.py +54 -54
- tree_sitter_analyzer/mcp/tools/read_partial_tool.py +310 -308
- tree_sitter_analyzer/mcp/tools/table_format_tool.py +386 -379
- tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py +563 -559
- tree_sitter_analyzer/mcp/utils/__init__.py +107 -107
- tree_sitter_analyzer/models.py +10 -10
- tree_sitter_analyzer/output_manager.py +253 -253
- tree_sitter_analyzer/plugins/__init__.py +280 -280
- tree_sitter_analyzer/plugins/base.py +529 -529
- tree_sitter_analyzer/plugins/manager.py +379 -379
- tree_sitter_analyzer/project_detector.py +330 -317
- tree_sitter_analyzer/queries/__init__.py +26 -26
- tree_sitter_analyzer/queries/java.py +391 -391
- tree_sitter_analyzer/queries/javascript.py +148 -148
- tree_sitter_analyzer/queries/python.py +285 -285
- tree_sitter_analyzer/queries/typescript.py +229 -229
- tree_sitter_analyzer/query_loader.py +257 -257
- tree_sitter_analyzer/security/boundary_manager.py +57 -51
- tree_sitter_analyzer/security/validator.py +246 -241
- tree_sitter_analyzer/utils.py +294 -277
- {tree_sitter_analyzer-0.9.1.dist-info → tree_sitter_analyzer-0.9.3.dist-info}/METADATA +13 -13
- tree_sitter_analyzer-0.9.3.dist-info/RECORD +77 -0
- {tree_sitter_analyzer-0.9.1.dist-info → tree_sitter_analyzer-0.9.3.dist-info}/entry_points.txt +1 -0
- tree_sitter_analyzer-0.9.1.dist-info/RECORD +0 -77
- {tree_sitter_analyzer-0.9.1.dist-info → tree_sitter_analyzer-0.9.3.dist-info}/WHEEL +0 -0
|
@@ -1,178 +1,181 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Base Command Class
|
|
4
|
-
|
|
5
|
-
Abstract base class for all CLI commands implementing the Command Pattern.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import asyncio
|
|
9
|
-
from abc import ABC, abstractmethod
|
|
10
|
-
from argparse import Namespace
|
|
11
|
-
from typing import Optional
|
|
12
|
-
|
|
13
|
-
from ...core.analysis_engine import AnalysisRequest, get_analysis_engine
|
|
14
|
-
from ...file_handler import read_file_partial
|
|
15
|
-
from ...language_detector import detect_language_from_file, is_language_supported
|
|
16
|
-
from ...models import AnalysisResult
|
|
17
|
-
from ...output_manager import output_error, output_info
|
|
18
|
-
from ...project_detector import detect_project_root
|
|
19
|
-
from ...security import SecurityValidator
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
class BaseCommand(ABC):
|
|
23
|
-
"""
|
|
24
|
-
Base class for all CLI commands.
|
|
25
|
-
|
|
26
|
-
Implements common functionality like file validation, language detection,
|
|
27
|
-
and analysis engine interaction.
|
|
28
|
-
"""
|
|
29
|
-
|
|
30
|
-
def __init__(self, args: Namespace):
|
|
31
|
-
"""Initialize command with parsed arguments."""
|
|
32
|
-
self.args = args
|
|
33
|
-
|
|
34
|
-
# Detect project root with priority handling
|
|
35
|
-
file_path = getattr(args,
|
|
36
|
-
explicit_root = getattr(args,
|
|
37
|
-
self.project_root = detect_project_root(file_path, explicit_root)
|
|
38
|
-
|
|
39
|
-
# Initialize components with project root
|
|
40
|
-
self.analysis_engine = get_analysis_engine(self.project_root)
|
|
41
|
-
self.security_validator = SecurityValidator(self.project_root)
|
|
42
|
-
|
|
43
|
-
def validate_file(self) -> bool:
|
|
44
|
-
"""Validate input file exists and is accessible."""
|
|
45
|
-
if not hasattr(self.args, "file_path") or not self.args.file_path:
|
|
46
|
-
output_error("File path not specified.")
|
|
47
|
-
return False
|
|
48
|
-
|
|
49
|
-
# Security validation
|
|
50
|
-
is_valid, error_msg = self.security_validator.validate_file_path(
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
)
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
return
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Base Command Class
|
|
4
|
+
|
|
5
|
+
Abstract base class for all CLI commands implementing the Command Pattern.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from abc import ABC, abstractmethod
|
|
10
|
+
from argparse import Namespace
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
from ...core.analysis_engine import AnalysisRequest, get_analysis_engine
|
|
14
|
+
from ...file_handler import read_file_partial
|
|
15
|
+
from ...language_detector import detect_language_from_file, is_language_supported
|
|
16
|
+
from ...models import AnalysisResult
|
|
17
|
+
from ...output_manager import output_error, output_info
|
|
18
|
+
from ...project_detector import detect_project_root
|
|
19
|
+
from ...security import SecurityValidator
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class BaseCommand(ABC):
|
|
23
|
+
"""
|
|
24
|
+
Base class for all CLI commands.
|
|
25
|
+
|
|
26
|
+
Implements common functionality like file validation, language detection,
|
|
27
|
+
and analysis engine interaction.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, args: Namespace):
|
|
31
|
+
"""Initialize command with parsed arguments."""
|
|
32
|
+
self.args = args
|
|
33
|
+
|
|
34
|
+
# Detect project root with priority handling
|
|
35
|
+
file_path = getattr(args, "file_path", None)
|
|
36
|
+
explicit_root = getattr(args, "project_root", None)
|
|
37
|
+
self.project_root = detect_project_root(file_path, explicit_root)
|
|
38
|
+
|
|
39
|
+
# Initialize components with project root
|
|
40
|
+
self.analysis_engine = get_analysis_engine(self.project_root)
|
|
41
|
+
self.security_validator = SecurityValidator(self.project_root)
|
|
42
|
+
|
|
43
|
+
def validate_file(self) -> bool:
|
|
44
|
+
"""Validate input file exists and is accessible."""
|
|
45
|
+
if not hasattr(self.args, "file_path") or not self.args.file_path:
|
|
46
|
+
output_error("File path not specified.")
|
|
47
|
+
return False
|
|
48
|
+
|
|
49
|
+
# Security validation
|
|
50
|
+
is_valid, error_msg = self.security_validator.validate_file_path(
|
|
51
|
+
self.args.file_path
|
|
52
|
+
)
|
|
53
|
+
if not is_valid:
|
|
54
|
+
output_error(f"Invalid file path: {error_msg}")
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
import os
|
|
58
|
+
|
|
59
|
+
if not os.path.exists(self.args.file_path):
|
|
60
|
+
output_error("Invalid file path: file does not exist")
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
return True
|
|
64
|
+
|
|
65
|
+
def detect_language(self) -> str | None:
|
|
66
|
+
"""Detect or validate the target language."""
|
|
67
|
+
if hasattr(self.args, "language") and self.args.language:
|
|
68
|
+
# Sanitize language input
|
|
69
|
+
sanitized_language = self.security_validator.sanitize_input(
|
|
70
|
+
self.args.language, max_length=50
|
|
71
|
+
)
|
|
72
|
+
target_language = sanitized_language.lower()
|
|
73
|
+
if (not hasattr(self.args, "table") or not self.args.table) and (
|
|
74
|
+
not hasattr(self.args, "quiet") or not self.args.quiet
|
|
75
|
+
):
|
|
76
|
+
output_info(f"INFO: Language explicitly specified: {target_language}")
|
|
77
|
+
else:
|
|
78
|
+
target_language = detect_language_from_file(self.args.file_path)
|
|
79
|
+
if target_language == "unknown":
|
|
80
|
+
output_error(
|
|
81
|
+
f"ERROR: Could not determine language for file '{self.args.file_path}'."
|
|
82
|
+
)
|
|
83
|
+
return None
|
|
84
|
+
else:
|
|
85
|
+
if (not hasattr(self.args, "table") or not self.args.table) and (
|
|
86
|
+
not hasattr(self.args, "quiet") or not self.args.quiet
|
|
87
|
+
):
|
|
88
|
+
# Language auto-detected - only show in verbose mode
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
# Language support validation
|
|
92
|
+
if not is_language_supported(target_language):
|
|
93
|
+
if target_language != "java":
|
|
94
|
+
if (not hasattr(self.args, "table") or not self.args.table) and (
|
|
95
|
+
not hasattr(self.args, "quiet") or not self.args.quiet
|
|
96
|
+
):
|
|
97
|
+
output_info(
|
|
98
|
+
"INFO: Trying with Java analysis engine. May not work correctly."
|
|
99
|
+
)
|
|
100
|
+
target_language = "java" # Fallback
|
|
101
|
+
|
|
102
|
+
return str(target_language) if target_language else None
|
|
103
|
+
|
|
104
|
+
async def analyze_file(self, language: str) -> Optional["AnalysisResult"]:
|
|
105
|
+
"""Perform file analysis using the unified analysis engine."""
|
|
106
|
+
try:
|
|
107
|
+
# Handle partial read if enabled
|
|
108
|
+
if hasattr(self.args, "partial_read") and self.args.partial_read:
|
|
109
|
+
try:
|
|
110
|
+
partial_content = read_file_partial(
|
|
111
|
+
self.args.file_path,
|
|
112
|
+
start_line=self.args.start_line,
|
|
113
|
+
end_line=getattr(self.args, "end_line", None),
|
|
114
|
+
start_column=getattr(self.args, "start_column", None),
|
|
115
|
+
end_column=getattr(self.args, "end_column", None),
|
|
116
|
+
)
|
|
117
|
+
if partial_content is None:
|
|
118
|
+
output_error("Failed to read file partially")
|
|
119
|
+
return None
|
|
120
|
+
except Exception as e:
|
|
121
|
+
output_error(f"Failed to read file partially: {e}")
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
request = AnalysisRequest(
|
|
125
|
+
file_path=self.args.file_path,
|
|
126
|
+
language=language,
|
|
127
|
+
include_complexity=True,
|
|
128
|
+
include_details=True,
|
|
129
|
+
)
|
|
130
|
+
analysis_result = await self.analysis_engine.analyze(request)
|
|
131
|
+
|
|
132
|
+
if not analysis_result or not analysis_result.success:
|
|
133
|
+
error_msg = (
|
|
134
|
+
analysis_result.error_message
|
|
135
|
+
if analysis_result
|
|
136
|
+
else "Unknown error"
|
|
137
|
+
)
|
|
138
|
+
output_error(f"Analysis failed: {error_msg}")
|
|
139
|
+
return None
|
|
140
|
+
|
|
141
|
+
return analysis_result
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
output_error(f"An error occurred during analysis: {e}")
|
|
145
|
+
return None
|
|
146
|
+
|
|
147
|
+
def execute(self) -> int:
|
|
148
|
+
"""
|
|
149
|
+
Execute the command.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
int: Exit code (0 for success, 1 for failure)
|
|
153
|
+
"""
|
|
154
|
+
# Validate inputs
|
|
155
|
+
if not self.validate_file():
|
|
156
|
+
return 1
|
|
157
|
+
|
|
158
|
+
# Detect language
|
|
159
|
+
language = self.detect_language()
|
|
160
|
+
if not language:
|
|
161
|
+
return 1
|
|
162
|
+
|
|
163
|
+
# Execute the specific command
|
|
164
|
+
try:
|
|
165
|
+
return asyncio.run(self.execute_async(language))
|
|
166
|
+
except Exception as e:
|
|
167
|
+
output_error(f"An error occurred during command execution: {e}")
|
|
168
|
+
return 1
|
|
169
|
+
|
|
170
|
+
@abstractmethod
|
|
171
|
+
async def execute_async(self, language: str) -> int:
|
|
172
|
+
"""
|
|
173
|
+
Execute the command asynchronously.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
language: Detected/specified target language
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
int: Exit code (0 for success, 1 for failure)
|
|
180
|
+
"""
|
|
181
|
+
pass
|
|
@@ -1,138 +1,138 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Structure Command
|
|
4
|
-
|
|
5
|
-
Handles structure analysis functionality with appropriate Japanese output.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from typing import TYPE_CHECKING
|
|
9
|
-
|
|
10
|
-
from ...output_manager import output_data, output_json, output_section
|
|
11
|
-
from .base_command import BaseCommand
|
|
12
|
-
|
|
13
|
-
if TYPE_CHECKING:
|
|
14
|
-
from ...models import AnalysisResult
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class StructureCommand(BaseCommand):
|
|
18
|
-
"""Command for structure analysis with Japanese output."""
|
|
19
|
-
|
|
20
|
-
async def execute_async(self, language: str) -> int:
|
|
21
|
-
analysis_result = await self.analyze_file(language)
|
|
22
|
-
if not analysis_result:
|
|
23
|
-
return 1
|
|
24
|
-
|
|
25
|
-
self._output_structure_analysis(analysis_result)
|
|
26
|
-
return 0
|
|
27
|
-
|
|
28
|
-
def _output_structure_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
|
-
"""Output structure analysis results with appropriate Japanese header."""
|
|
30
|
-
output_section("Structure Analysis Results")
|
|
31
|
-
|
|
32
|
-
# Convert to legacy structure format expected by tests
|
|
33
|
-
structure_dict = self._convert_to_legacy_format(analysis_result)
|
|
34
|
-
|
|
35
|
-
if self.args.output_format == "json":
|
|
36
|
-
output_json(structure_dict)
|
|
37
|
-
else:
|
|
38
|
-
self._output_text_format(structure_dict)
|
|
39
|
-
|
|
40
|
-
def _convert_to_legacy_format(self, analysis_result: "AnalysisResult") -> dict:
|
|
41
|
-
"""Convert AnalysisResult to legacy structure format expected by tests."""
|
|
42
|
-
import time
|
|
43
|
-
|
|
44
|
-
# Extract elements by type
|
|
45
|
-
classes = [
|
|
46
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Class"
|
|
47
|
-
]
|
|
48
|
-
methods = [
|
|
49
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Function"
|
|
50
|
-
]
|
|
51
|
-
fields = [
|
|
52
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Variable"
|
|
53
|
-
]
|
|
54
|
-
imports = [
|
|
55
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Import"
|
|
56
|
-
]
|
|
57
|
-
packages = [
|
|
58
|
-
e for e in analysis_result.elements if e.__class__.__name__ == "Package"
|
|
59
|
-
]
|
|
60
|
-
|
|
61
|
-
return {
|
|
62
|
-
"file_path": analysis_result.file_path,
|
|
63
|
-
"language": analysis_result.language,
|
|
64
|
-
"package": (
|
|
65
|
-
{
|
|
66
|
-
"name": packages[0].name,
|
|
67
|
-
"line_range": {
|
|
68
|
-
"start": packages[0].start_line,
|
|
69
|
-
"end": packages[0].end_line,
|
|
70
|
-
},
|
|
71
|
-
}
|
|
72
|
-
if packages
|
|
73
|
-
else None
|
|
74
|
-
),
|
|
75
|
-
"classes": [{"name": getattr(c, "name", "unknown")} for c in classes],
|
|
76
|
-
"methods": [{"name": getattr(m, "name", "unknown")} for m in methods],
|
|
77
|
-
"fields": [{"name": getattr(f, "name", "unknown")} for f in fields],
|
|
78
|
-
"imports": [
|
|
79
|
-
{
|
|
80
|
-
"name": getattr(i, "name", "unknown"),
|
|
81
|
-
"is_static": getattr(i, "is_static", False),
|
|
82
|
-
"is_wildcard": getattr(i, "is_wildcard", False),
|
|
83
|
-
"statement": getattr(i, "import_statement", ""),
|
|
84
|
-
"line_range": {
|
|
85
|
-
"start": getattr(i, "start_line", 0),
|
|
86
|
-
"end": getattr(i, "end_line", 0),
|
|
87
|
-
},
|
|
88
|
-
}
|
|
89
|
-
for i in imports
|
|
90
|
-
],
|
|
91
|
-
"annotations": [],
|
|
92
|
-
"statistics": {
|
|
93
|
-
"class_count": len(classes),
|
|
94
|
-
"method_count": len(methods),
|
|
95
|
-
"field_count": len(fields),
|
|
96
|
-
"import_count": len(imports),
|
|
97
|
-
"total_lines": analysis_result.line_count,
|
|
98
|
-
"annotation_count": 0,
|
|
99
|
-
},
|
|
100
|
-
"analysis_metadata": {
|
|
101
|
-
"analysis_time": getattr(analysis_result, "analysis_time", 0.0),
|
|
102
|
-
"language": analysis_result.language,
|
|
103
|
-
"file_path": analysis_result.file_path,
|
|
104
|
-
"analyzer_version": "2.0.0",
|
|
105
|
-
"timestamp": time.time(),
|
|
106
|
-
},
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
def _output_text_format(self, structure_dict: dict) -> None:
|
|
110
|
-
"""Output structure analysis in human-readable text format."""
|
|
111
|
-
output_data(f"File: {structure_dict['file_path']}")
|
|
112
|
-
output_data(f"Language: {structure_dict['language']}")
|
|
113
|
-
|
|
114
|
-
if structure_dict["package"]:
|
|
115
|
-
output_data(f"Package: {structure_dict['package']['name']}")
|
|
116
|
-
|
|
117
|
-
stats = structure_dict["statistics"]
|
|
118
|
-
output_data("Statistics:")
|
|
119
|
-
output_data(f" Classes: {stats['class_count']}")
|
|
120
|
-
output_data(f" Methods: {stats['method_count']}")
|
|
121
|
-
output_data(f" Fields: {stats['field_count']}")
|
|
122
|
-
output_data(f" Imports: {stats['import_count']}")
|
|
123
|
-
output_data(f" Total lines: {stats['total_lines']}")
|
|
124
|
-
|
|
125
|
-
if structure_dict["classes"]:
|
|
126
|
-
output_data("Classes:")
|
|
127
|
-
for cls in structure_dict["classes"]:
|
|
128
|
-
output_data(f" - {cls['name']}")
|
|
129
|
-
|
|
130
|
-
if structure_dict["methods"]:
|
|
131
|
-
output_data("Methods:")
|
|
132
|
-
for method in structure_dict["methods"]:
|
|
133
|
-
output_data(f" - {method['name']}")
|
|
134
|
-
|
|
135
|
-
if structure_dict["fields"]:
|
|
136
|
-
output_data("Fields:")
|
|
137
|
-
for field in structure_dict["fields"]:
|
|
138
|
-
output_data(f" - {field['name']}")
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Structure Command
|
|
4
|
+
|
|
5
|
+
Handles structure analysis functionality with appropriate Japanese output.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
from ...output_manager import output_data, output_json, output_section
|
|
11
|
+
from .base_command import BaseCommand
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from ...models import AnalysisResult
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class StructureCommand(BaseCommand):
|
|
18
|
+
"""Command for structure analysis with Japanese output."""
|
|
19
|
+
|
|
20
|
+
async def execute_async(self, language: str) -> int:
|
|
21
|
+
analysis_result = await self.analyze_file(language)
|
|
22
|
+
if not analysis_result:
|
|
23
|
+
return 1
|
|
24
|
+
|
|
25
|
+
self._output_structure_analysis(analysis_result)
|
|
26
|
+
return 0
|
|
27
|
+
|
|
28
|
+
def _output_structure_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
|
+
"""Output structure analysis results with appropriate Japanese header."""
|
|
30
|
+
output_section("Structure Analysis Results")
|
|
31
|
+
|
|
32
|
+
# Convert to legacy structure format expected by tests
|
|
33
|
+
structure_dict = self._convert_to_legacy_format(analysis_result)
|
|
34
|
+
|
|
35
|
+
if self.args.output_format == "json":
|
|
36
|
+
output_json(structure_dict)
|
|
37
|
+
else:
|
|
38
|
+
self._output_text_format(structure_dict)
|
|
39
|
+
|
|
40
|
+
def _convert_to_legacy_format(self, analysis_result: "AnalysisResult") -> dict:
|
|
41
|
+
"""Convert AnalysisResult to legacy structure format expected by tests."""
|
|
42
|
+
import time
|
|
43
|
+
|
|
44
|
+
# Extract elements by type
|
|
45
|
+
classes = [
|
|
46
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Class"
|
|
47
|
+
]
|
|
48
|
+
methods = [
|
|
49
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Function"
|
|
50
|
+
]
|
|
51
|
+
fields = [
|
|
52
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Variable"
|
|
53
|
+
]
|
|
54
|
+
imports = [
|
|
55
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Import"
|
|
56
|
+
]
|
|
57
|
+
packages = [
|
|
58
|
+
e for e in analysis_result.elements if e.__class__.__name__ == "Package"
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
return {
|
|
62
|
+
"file_path": analysis_result.file_path,
|
|
63
|
+
"language": analysis_result.language,
|
|
64
|
+
"package": (
|
|
65
|
+
{
|
|
66
|
+
"name": packages[0].name,
|
|
67
|
+
"line_range": {
|
|
68
|
+
"start": packages[0].start_line,
|
|
69
|
+
"end": packages[0].end_line,
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
if packages
|
|
73
|
+
else None
|
|
74
|
+
),
|
|
75
|
+
"classes": [{"name": getattr(c, "name", "unknown")} for c in classes],
|
|
76
|
+
"methods": [{"name": getattr(m, "name", "unknown")} for m in methods],
|
|
77
|
+
"fields": [{"name": getattr(f, "name", "unknown")} for f in fields],
|
|
78
|
+
"imports": [
|
|
79
|
+
{
|
|
80
|
+
"name": getattr(i, "name", "unknown"),
|
|
81
|
+
"is_static": getattr(i, "is_static", False),
|
|
82
|
+
"is_wildcard": getattr(i, "is_wildcard", False),
|
|
83
|
+
"statement": getattr(i, "import_statement", ""),
|
|
84
|
+
"line_range": {
|
|
85
|
+
"start": getattr(i, "start_line", 0),
|
|
86
|
+
"end": getattr(i, "end_line", 0),
|
|
87
|
+
},
|
|
88
|
+
}
|
|
89
|
+
for i in imports
|
|
90
|
+
],
|
|
91
|
+
"annotations": [],
|
|
92
|
+
"statistics": {
|
|
93
|
+
"class_count": len(classes),
|
|
94
|
+
"method_count": len(methods),
|
|
95
|
+
"field_count": len(fields),
|
|
96
|
+
"import_count": len(imports),
|
|
97
|
+
"total_lines": analysis_result.line_count,
|
|
98
|
+
"annotation_count": 0,
|
|
99
|
+
},
|
|
100
|
+
"analysis_metadata": {
|
|
101
|
+
"analysis_time": getattr(analysis_result, "analysis_time", 0.0),
|
|
102
|
+
"language": analysis_result.language,
|
|
103
|
+
"file_path": analysis_result.file_path,
|
|
104
|
+
"analyzer_version": "2.0.0",
|
|
105
|
+
"timestamp": time.time(),
|
|
106
|
+
},
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
def _output_text_format(self, structure_dict: dict) -> None:
|
|
110
|
+
"""Output structure analysis in human-readable text format."""
|
|
111
|
+
output_data(f"File: {structure_dict['file_path']}")
|
|
112
|
+
output_data(f"Language: {structure_dict['language']}")
|
|
113
|
+
|
|
114
|
+
if structure_dict["package"]:
|
|
115
|
+
output_data(f"Package: {structure_dict['package']['name']}")
|
|
116
|
+
|
|
117
|
+
stats = structure_dict["statistics"]
|
|
118
|
+
output_data("Statistics:")
|
|
119
|
+
output_data(f" Classes: {stats['class_count']}")
|
|
120
|
+
output_data(f" Methods: {stats['method_count']}")
|
|
121
|
+
output_data(f" Fields: {stats['field_count']}")
|
|
122
|
+
output_data(f" Imports: {stats['import_count']}")
|
|
123
|
+
output_data(f" Total lines: {stats['total_lines']}")
|
|
124
|
+
|
|
125
|
+
if structure_dict["classes"]:
|
|
126
|
+
output_data("Classes:")
|
|
127
|
+
for cls in structure_dict["classes"]:
|
|
128
|
+
output_data(f" - {cls['name']}")
|
|
129
|
+
|
|
130
|
+
if structure_dict["methods"]:
|
|
131
|
+
output_data("Methods:")
|
|
132
|
+
for method in structure_dict["methods"]:
|
|
133
|
+
output_data(f" - {method['name']}")
|
|
134
|
+
|
|
135
|
+
if structure_dict["fields"]:
|
|
136
|
+
output_data("Fields:")
|
|
137
|
+
for field in structure_dict["fields"]:
|
|
138
|
+
output_data(f" - {field['name']}")
|