tree-sitter-analyzer 1.9.2__py3-none-any.whl → 1.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/__init__.py +1 -1
- tree_sitter_analyzer/api.py +216 -8
- tree_sitter_analyzer/cli/argument_validator.py +1 -1
- tree_sitter_analyzer/cli/commands/advanced_command.py +3 -6
- tree_sitter_analyzer/cli/commands/query_command.py +3 -1
- tree_sitter_analyzer/cli/commands/table_command.py +3 -3
- tree_sitter_analyzer/constants.py +5 -3
- tree_sitter_analyzer/core/analysis_engine.py +1 -1
- tree_sitter_analyzer/core/cache_service.py +1 -1
- tree_sitter_analyzer/core/engine.py +34 -10
- tree_sitter_analyzer/core/query.py +82 -2
- tree_sitter_analyzer/encoding_utils.py +64 -0
- tree_sitter_analyzer/exceptions.py +1 -1
- tree_sitter_analyzer/file_handler.py +49 -33
- tree_sitter_analyzer/formatters/base_formatter.py +1 -1
- tree_sitter_analyzer/formatters/html_formatter.py +24 -14
- tree_sitter_analyzer/formatters/javascript_formatter.py +28 -21
- tree_sitter_analyzer/formatters/language_formatter_factory.py +7 -4
- tree_sitter_analyzer/formatters/markdown_formatter.py +4 -4
- tree_sitter_analyzer/formatters/python_formatter.py +4 -4
- tree_sitter_analyzer/formatters/typescript_formatter.py +1 -1
- tree_sitter_analyzer/interfaces/mcp_adapter.py +4 -2
- tree_sitter_analyzer/interfaces/mcp_server.py +10 -10
- tree_sitter_analyzer/language_detector.py +30 -5
- tree_sitter_analyzer/language_loader.py +46 -26
- tree_sitter_analyzer/languages/css_plugin.py +6 -6
- tree_sitter_analyzer/languages/html_plugin.py +12 -8
- tree_sitter_analyzer/languages/java_plugin.py +330 -520
- tree_sitter_analyzer/languages/javascript_plugin.py +22 -78
- tree_sitter_analyzer/languages/markdown_plugin.py +277 -297
- tree_sitter_analyzer/languages/python_plugin.py +47 -85
- tree_sitter_analyzer/languages/typescript_plugin.py +48 -123
- tree_sitter_analyzer/mcp/resources/project_stats_resource.py +14 -8
- tree_sitter_analyzer/mcp/server.py +38 -23
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +10 -7
- tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py +51 -7
- tree_sitter_analyzer/mcp/tools/fd_rg_utils.py +11 -7
- tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py +8 -6
- tree_sitter_analyzer/mcp/tools/list_files_tool.py +6 -6
- tree_sitter_analyzer/mcp/tools/output_format_validator.py +148 -0
- tree_sitter_analyzer/mcp/tools/search_content_tool.py +48 -15
- tree_sitter_analyzer/mcp/tools/table_format_tool.py +13 -8
- tree_sitter_analyzer/mcp/utils/file_output_manager.py +8 -3
- tree_sitter_analyzer/mcp/utils/gitignore_detector.py +24 -12
- tree_sitter_analyzer/mcp/utils/path_resolver.py +2 -2
- tree_sitter_analyzer/models.py +16 -0
- tree_sitter_analyzer/mypy_current_errors.txt +2 -0
- tree_sitter_analyzer/plugins/base.py +66 -0
- tree_sitter_analyzer/queries/java.py +9 -3
- tree_sitter_analyzer/queries/javascript.py +3 -8
- tree_sitter_analyzer/queries/markdown.py +1 -1
- tree_sitter_analyzer/queries/python.py +2 -2
- tree_sitter_analyzer/security/boundary_manager.py +2 -5
- tree_sitter_analyzer/security/regex_checker.py +2 -2
- tree_sitter_analyzer/security/validator.py +5 -1
- tree_sitter_analyzer/table_formatter.py +4 -4
- tree_sitter_analyzer/utils/__init__.py +27 -116
- tree_sitter_analyzer/{utils.py → utils/logging.py} +2 -2
- tree_sitter_analyzer/utils/tree_sitter_compat.py +2 -2
- {tree_sitter_analyzer-1.9.2.dist-info → tree_sitter_analyzer-1.9.4.dist-info}/METADATA +87 -45
- tree_sitter_analyzer-1.9.4.dist-info/RECORD +111 -0
- tree_sitter_analyzer-1.9.2.dist-info/RECORD +0 -109
- {tree_sitter_analyzer-1.9.2.dist-info → tree_sitter_analyzer-1.9.4.dist-info}/WHEEL +0 -0
- {tree_sitter_analyzer-1.9.2.dist-info → tree_sitter_analyzer-1.9.4.dist-info}/entry_points.txt +0 -0
|
@@ -7,7 +7,7 @@ Migrated from AdvancedAnalyzer implementation for future independence.
|
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
import re
|
|
10
|
-
from typing import TYPE_CHECKING, Any
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
11
11
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
13
|
import tree_sitter
|
|
@@ -16,7 +16,7 @@ if TYPE_CHECKING:
|
|
|
16
16
|
from ..models import AnalysisResult
|
|
17
17
|
|
|
18
18
|
from ..encoding_utils import extract_text_slice, safe_encode
|
|
19
|
-
from ..models import Class,
|
|
19
|
+
from ..models import Class, Function, Import, Package, Variable
|
|
20
20
|
from ..plugins.base import ElementExtractor, LanguagePlugin
|
|
21
21
|
from ..utils import log_debug, log_error, log_warning
|
|
22
22
|
|
|
@@ -258,16 +258,13 @@ class JavaElementExtractor(ElementExtractor):
|
|
|
258
258
|
packages: list[Package] = []
|
|
259
259
|
|
|
260
260
|
# Extract package declaration
|
|
261
|
-
if tree
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
if package_info:
|
|
269
|
-
packages.append(package_info)
|
|
270
|
-
break # Only one package declaration per file
|
|
261
|
+
if tree and tree.root_node:
|
|
262
|
+
for child in tree.root_node.children:
|
|
263
|
+
if child.type == "package_declaration":
|
|
264
|
+
package_info = self._extract_package_element(child)
|
|
265
|
+
if package_info:
|
|
266
|
+
packages.append(package_info)
|
|
267
|
+
break # Only one package declaration per file
|
|
271
268
|
|
|
272
269
|
log_debug(f"Extracted {len(packages)} packages")
|
|
273
270
|
return packages
|
|
@@ -456,7 +453,8 @@ class JavaElementExtractor(ElementExtractor):
|
|
|
456
453
|
if start_point[0] == end_point[0]:
|
|
457
454
|
# Single line
|
|
458
455
|
line = self.content_lines[start_point[0]]
|
|
459
|
-
|
|
456
|
+
result: str = line[start_point[1] : end_point[1]]
|
|
457
|
+
return result
|
|
460
458
|
else:
|
|
461
459
|
# Multiple lines
|
|
462
460
|
lines = []
|
|
@@ -835,623 +833,435 @@ class JavaElementExtractor(ElementExtractor):
|
|
|
835
833
|
log_debug(f"Failed to extract package element: {e}")
|
|
836
834
|
except Exception as e:
|
|
837
835
|
log_error(f"Unexpected error in package element extraction: {e}")
|
|
836
|
+
|
|
838
837
|
return None
|
|
839
838
|
|
|
840
839
|
def _extract_package_from_tree(self, tree: "tree_sitter.Tree") -> None:
|
|
841
|
-
"""
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
This method ensures that package information is available for class extraction
|
|
845
|
-
regardless of the order in which extraction methods are called.
|
|
846
|
-
"""
|
|
847
|
-
try:
|
|
848
|
-
# Look for package declaration in the root node's children
|
|
840
|
+
"""Extract package information from tree when needed"""
|
|
841
|
+
if tree and tree.root_node:
|
|
849
842
|
for child in tree.root_node.children:
|
|
850
843
|
if child.type == "package_declaration":
|
|
851
844
|
self._extract_package_info(child)
|
|
852
|
-
break # Only one package declaration per file
|
|
853
|
-
except Exception as e:
|
|
854
|
-
log_debug(f"Failed to extract package from tree: {e}")
|
|
855
|
-
|
|
856
|
-
def _determine_visibility(self, modifiers: list[str]) -> str:
|
|
857
|
-
"""Determine visibility from modifiers"""
|
|
858
|
-
if "public" in modifiers:
|
|
859
|
-
return "public"
|
|
860
|
-
elif "private" in modifiers:
|
|
861
|
-
return "private"
|
|
862
|
-
elif "protected" in modifiers:
|
|
863
|
-
return "protected"
|
|
864
|
-
else:
|
|
865
|
-
return "package" # Default package visibility
|
|
866
|
-
|
|
867
|
-
def _find_annotations_for_line_cached(
|
|
868
|
-
self, target_line: int
|
|
869
|
-
) -> list[dict[str, Any]]:
|
|
870
|
-
"""Find annotations for specified line with caching (from AdvancedAnalyzer)"""
|
|
871
|
-
if target_line in self._annotation_cache:
|
|
872
|
-
return self._annotation_cache[target_line]
|
|
873
|
-
|
|
874
|
-
result_annotations = []
|
|
875
|
-
for annotation in self.annotations:
|
|
876
|
-
line_distance = target_line - annotation.get("end_line", 0)
|
|
877
|
-
if 1 <= line_distance <= 5:
|
|
878
|
-
result_annotations.append(annotation)
|
|
879
|
-
|
|
880
|
-
self._annotation_cache[target_line] = result_annotations
|
|
881
|
-
return result_annotations
|
|
882
|
-
|
|
883
|
-
def _calculate_complexity_optimized(self, node: "tree_sitter.Node") -> int:
|
|
884
|
-
"""Calculate cyclomatic complexity efficiently (from AdvancedAnalyzer)"""
|
|
885
|
-
complexity = 1
|
|
886
|
-
try:
|
|
887
|
-
node_text = self._get_node_text_optimized(node).lower()
|
|
888
|
-
keywords = ["if", "while", "for", "catch", "case", "switch"]
|
|
889
|
-
for keyword in keywords:
|
|
890
|
-
complexity += node_text.count(keyword)
|
|
891
|
-
except (AttributeError, TypeError) as e:
|
|
892
|
-
log_debug(f"Failed to calculate complexity: {e}")
|
|
893
|
-
except Exception as e:
|
|
894
|
-
log_error(f"Unexpected error in complexity calculation: {e}")
|
|
895
|
-
return complexity
|
|
896
|
-
|
|
897
|
-
def _extract_javadoc_for_line(self, target_line: int) -> str | None:
|
|
898
|
-
"""Extract JavaDoc comment immediately before the specified line"""
|
|
899
|
-
try:
|
|
900
|
-
if not self.content_lines or target_line <= 1:
|
|
901
|
-
return None
|
|
902
|
-
|
|
903
|
-
# Search backwards from target_line
|
|
904
|
-
javadoc_lines = []
|
|
905
|
-
current_line = target_line - 1
|
|
906
|
-
|
|
907
|
-
# Skip empty lines
|
|
908
|
-
while current_line > 0:
|
|
909
|
-
line = self.content_lines[current_line - 1].strip()
|
|
910
|
-
if line:
|
|
911
845
|
break
|
|
912
|
-
current_line -= 1
|
|
913
|
-
|
|
914
|
-
# Check for JavaDoc end
|
|
915
|
-
if current_line > 0:
|
|
916
|
-
line = self.content_lines[current_line - 1].strip()
|
|
917
|
-
if line.endswith("*/"):
|
|
918
|
-
# This might be a JavaDoc comment
|
|
919
|
-
javadoc_lines.append(self.content_lines[current_line - 1])
|
|
920
|
-
current_line -= 1
|
|
921
|
-
|
|
922
|
-
# Collect JavaDoc content
|
|
923
|
-
while current_line > 0:
|
|
924
|
-
line_content = self.content_lines[current_line - 1]
|
|
925
|
-
line_stripped = line_content.strip()
|
|
926
|
-
javadoc_lines.append(line_content)
|
|
927
|
-
|
|
928
|
-
if line_stripped.startswith("/**"):
|
|
929
|
-
# Found the start of JavaDoc
|
|
930
|
-
javadoc_lines.reverse()
|
|
931
|
-
javadoc_text = "\n".join(javadoc_lines)
|
|
932
|
-
|
|
933
|
-
# Clean up the JavaDoc
|
|
934
|
-
return self._clean_javadoc(javadoc_text)
|
|
935
|
-
current_line -= 1
|
|
936
|
-
|
|
937
|
-
return None
|
|
938
|
-
|
|
939
|
-
except Exception as e:
|
|
940
|
-
log_debug(f"Failed to extract JavaDoc: {e}")
|
|
941
|
-
return None
|
|
942
|
-
|
|
943
|
-
def _clean_javadoc(self, javadoc_text: str) -> str:
|
|
944
|
-
"""Clean JavaDoc text by removing comment markers"""
|
|
945
|
-
if not javadoc_text:
|
|
946
|
-
return ""
|
|
947
|
-
|
|
948
|
-
lines = javadoc_text.split("\n")
|
|
949
|
-
cleaned_lines = []
|
|
950
|
-
|
|
951
|
-
for line in lines:
|
|
952
|
-
# Remove leading/trailing whitespace
|
|
953
|
-
line = line.strip()
|
|
954
|
-
|
|
955
|
-
# Remove comment markers
|
|
956
|
-
if line.startswith("/**"):
|
|
957
|
-
line = line[3:].strip()
|
|
958
|
-
elif line.startswith("*/"):
|
|
959
|
-
line = line[2:].strip()
|
|
960
|
-
elif line.startswith("*"):
|
|
961
|
-
line = line[1:].strip()
|
|
962
|
-
|
|
963
|
-
if line: # Only add non-empty lines
|
|
964
|
-
cleaned_lines.append(line)
|
|
965
|
-
|
|
966
|
-
return " ".join(cleaned_lines) if cleaned_lines else ""
|
|
967
|
-
|
|
968
|
-
def _is_nested_class(self, node: "tree_sitter.Node") -> bool:
|
|
969
|
-
"""Check if this is a nested class (from AdvancedAnalyzer)"""
|
|
970
|
-
current = node.parent
|
|
971
|
-
while current:
|
|
972
|
-
if current.type in [
|
|
973
|
-
"class_declaration",
|
|
974
|
-
"interface_declaration",
|
|
975
|
-
"enum_declaration",
|
|
976
|
-
]:
|
|
977
|
-
return True
|
|
978
|
-
current = current.parent
|
|
979
|
-
return False
|
|
980
|
-
|
|
981
|
-
def _find_parent_class(self, node: "tree_sitter.Node") -> str | None:
|
|
982
|
-
"""Find parent class name (from AdvancedAnalyzer)"""
|
|
983
|
-
current = node.parent
|
|
984
|
-
while current:
|
|
985
|
-
if current.type in [
|
|
986
|
-
"class_declaration",
|
|
987
|
-
"interface_declaration",
|
|
988
|
-
"enum_declaration",
|
|
989
|
-
]:
|
|
990
|
-
return self._extract_class_name(current)
|
|
991
|
-
current = current.parent
|
|
992
|
-
return None
|
|
993
|
-
|
|
994
|
-
def _extract_class_name(self, node: "tree_sitter.Node") -> str | None:
|
|
995
|
-
"""Extract class name from node (from AdvancedAnalyzer)"""
|
|
996
|
-
for child in node.children:
|
|
997
|
-
if child.type == "identifier":
|
|
998
|
-
return self._get_node_text_optimized(child)
|
|
999
|
-
return None
|
|
1000
|
-
|
|
1001
|
-
def _extract_annotation_optimized(
|
|
1002
|
-
self, node: "tree_sitter.Node"
|
|
1003
|
-
) -> dict[str, Any] | None:
|
|
1004
|
-
"""Extract annotation information optimized (from AdvancedAnalyzer)"""
|
|
1005
|
-
try:
|
|
1006
|
-
start_line = node.start_point[0] + 1
|
|
1007
|
-
end_line = node.end_point[0] + 1
|
|
1008
|
-
raw_text = self._get_node_text_optimized(node)
|
|
1009
|
-
|
|
1010
|
-
# Extract annotation name efficiently
|
|
1011
|
-
name_match = re.search(r"@(\w+)", raw_text)
|
|
1012
|
-
if not name_match:
|
|
1013
|
-
return None
|
|
1014
|
-
|
|
1015
|
-
annotation_name = name_match.group(1)
|
|
1016
|
-
|
|
1017
|
-
# Extract parameters efficiently
|
|
1018
|
-
parameters = []
|
|
1019
|
-
param_match = re.search(r"\((.*?)\)", raw_text, re.DOTALL)
|
|
1020
|
-
if param_match:
|
|
1021
|
-
param_text = param_match.group(1).strip()
|
|
1022
|
-
if param_text:
|
|
1023
|
-
# Simple parameter parsing
|
|
1024
|
-
if "=" in param_text:
|
|
1025
|
-
parameters = [
|
|
1026
|
-
p.strip() for p in re.split(r",(?![^()]*\))", param_text)
|
|
1027
|
-
]
|
|
1028
|
-
else:
|
|
1029
|
-
parameters = [param_text]
|
|
1030
|
-
|
|
1031
|
-
return {
|
|
1032
|
-
"name": annotation_name,
|
|
1033
|
-
"parameters": parameters,
|
|
1034
|
-
"start_line": start_line,
|
|
1035
|
-
"end_line": end_line,
|
|
1036
|
-
"raw_text": raw_text,
|
|
1037
|
-
}
|
|
1038
|
-
except (AttributeError, IndexError, ValueError) as e:
|
|
1039
|
-
log_debug(f"Failed to extract annotation from node: {e}")
|
|
1040
|
-
return None
|
|
1041
|
-
except Exception as e:
|
|
1042
|
-
log_error(f"Unexpected exception in annotation extraction: {e}")
|
|
1043
|
-
return None
|
|
1044
846
|
|
|
1045
847
|
def _extract_import_info(
|
|
1046
848
|
self, node: "tree_sitter.Node", source_code: str
|
|
1047
849
|
) -> Import | None:
|
|
1048
|
-
"""Extract import information
|
|
850
|
+
"""Extract import information from import declaration node"""
|
|
1049
851
|
try:
|
|
1050
852
|
import_text = self._get_node_text_optimized(node)
|
|
1051
|
-
|
|
1052
|
-
import_content = import_text.strip()
|
|
1053
|
-
if import_content.endswith(";"):
|
|
1054
|
-
import_content = import_content[:-1]
|
|
853
|
+
line_num = node.start_point[0] + 1
|
|
1055
854
|
|
|
1056
|
-
|
|
855
|
+
# Parse import statement
|
|
856
|
+
if "static" in import_text:
|
|
1057
857
|
# Static import
|
|
1058
|
-
static_match = re.search(r"import\s+static\s+([\w.]+)",
|
|
858
|
+
static_match = re.search(r"import\s+static\s+([\w.]+)", import_text)
|
|
1059
859
|
if static_match:
|
|
1060
860
|
import_name = static_match.group(1)
|
|
1061
|
-
|
|
1062
|
-
if import_content.endswith(".*"):
|
|
861
|
+
if import_text.endswith(".*"):
|
|
1063
862
|
import_name = import_name.replace(".*", "")
|
|
1064
863
|
|
|
1065
|
-
# For static imports, extract the class name
|
|
864
|
+
# For static imports, extract the class name
|
|
1066
865
|
parts = import_name.split(".")
|
|
1067
866
|
if len(parts) > 1:
|
|
1068
|
-
# Remove the last part (method/field name) to get class name
|
|
1069
867
|
import_name = ".".join(parts[:-1])
|
|
1070
868
|
|
|
1071
869
|
return Import(
|
|
1072
870
|
name=import_name,
|
|
1073
|
-
start_line=
|
|
1074
|
-
end_line=
|
|
871
|
+
start_line=line_num,
|
|
872
|
+
end_line=line_num,
|
|
1075
873
|
raw_text=import_text,
|
|
1076
874
|
language="java",
|
|
1077
875
|
module_name=import_name,
|
|
1078
876
|
is_static=True,
|
|
1079
|
-
is_wildcard=
|
|
1080
|
-
import_statement=
|
|
877
|
+
is_wildcard=import_text.endswith(".*"),
|
|
878
|
+
import_statement=import_text,
|
|
1081
879
|
)
|
|
1082
880
|
else:
|
|
1083
881
|
# Normal import
|
|
1084
|
-
normal_match = re.search(r"import\s+([\w.]+)",
|
|
882
|
+
normal_match = re.search(r"import\s+([\w.]+)", import_text)
|
|
1085
883
|
if normal_match:
|
|
1086
884
|
import_name = normal_match.group(1)
|
|
1087
|
-
|
|
1088
|
-
if import_content.endswith(".*"):
|
|
885
|
+
if import_text.endswith(".*"):
|
|
1089
886
|
if import_name.endswith(".*"):
|
|
1090
|
-
import_name = import_name[:-2]
|
|
887
|
+
import_name = import_name[:-2]
|
|
1091
888
|
elif import_name.endswith("."):
|
|
1092
|
-
import_name = import_name[:-1]
|
|
889
|
+
import_name = import_name[:-1]
|
|
1093
890
|
|
|
1094
891
|
return Import(
|
|
1095
892
|
name=import_name,
|
|
1096
|
-
start_line=
|
|
1097
|
-
end_line=
|
|
893
|
+
start_line=line_num,
|
|
894
|
+
end_line=line_num,
|
|
1098
895
|
raw_text=import_text,
|
|
1099
896
|
language="java",
|
|
1100
897
|
module_name=import_name,
|
|
1101
898
|
is_static=False,
|
|
1102
|
-
is_wildcard=
|
|
1103
|
-
import_statement=
|
|
899
|
+
is_wildcard=import_text.endswith(".*"),
|
|
900
|
+
import_statement=import_text,
|
|
1104
901
|
)
|
|
1105
|
-
except (AttributeError, ValueError, IndexError) as e:
|
|
1106
|
-
log_debug(f"Failed to extract import info: {e}")
|
|
1107
902
|
except Exception as e:
|
|
1108
|
-
|
|
1109
|
-
return None
|
|
903
|
+
log_debug(f"Failed to extract import info: {e}")
|
|
1110
904
|
|
|
1111
|
-
|
|
1112
|
-
"""Extract elements from source code using tree-sitter AST"""
|
|
1113
|
-
elements = []
|
|
905
|
+
return None
|
|
1114
906
|
|
|
907
|
+
def _extract_annotation_optimized(
|
|
908
|
+
self, node: "tree_sitter.Node"
|
|
909
|
+
) -> dict[str, Any] | None:
|
|
910
|
+
"""Extract annotation information optimized"""
|
|
1115
911
|
try:
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
elements.extend(self.extract_variables(tree, source_code))
|
|
1119
|
-
elements.extend(self.extract_imports(tree, source_code))
|
|
1120
|
-
except Exception as e:
|
|
1121
|
-
log_error(f"Failed to extract elements: {e}")
|
|
1122
|
-
|
|
1123
|
-
return elements
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
class JavaPlugin(LanguagePlugin):
|
|
1127
|
-
"""Java language plugin for the new architecture"""
|
|
912
|
+
annotation_text = self._get_node_text_optimized(node)
|
|
913
|
+
start_line = node.start_point[0] + 1
|
|
1128
914
|
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
915
|
+
# Extract annotation name
|
|
916
|
+
annotation_name = None
|
|
917
|
+
for child in node.children:
|
|
918
|
+
if child.type == "identifier":
|
|
919
|
+
annotation_name = self._get_node_text_optimized(child)
|
|
920
|
+
break
|
|
1134
921
|
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
922
|
+
if not annotation_name:
|
|
923
|
+
# Try to extract from text
|
|
924
|
+
match = re.search(r"@(\w+)", annotation_text)
|
|
925
|
+
if match:
|
|
926
|
+
annotation_name = match.group(1)
|
|
927
|
+
|
|
928
|
+
if annotation_name:
|
|
929
|
+
return {
|
|
930
|
+
"name": annotation_name,
|
|
931
|
+
"line": start_line,
|
|
932
|
+
"text": annotation_text,
|
|
933
|
+
"type": "annotation",
|
|
934
|
+
}
|
|
935
|
+
except Exception as e:
|
|
936
|
+
log_debug(f"Failed to extract annotation: {e}")
|
|
1139
937
|
|
|
1140
|
-
|
|
1141
|
-
"""Return the name of the programming language this plugin supports"""
|
|
1142
|
-
return "java"
|
|
938
|
+
return None
|
|
1143
939
|
|
|
1144
|
-
def
|
|
1145
|
-
"""
|
|
1146
|
-
|
|
940
|
+
def _determine_visibility(self, modifiers: list[str]) -> str:
|
|
941
|
+
"""Determine visibility from modifiers"""
|
|
942
|
+
if "public" in modifiers:
|
|
943
|
+
return "public"
|
|
944
|
+
elif "private" in modifiers:
|
|
945
|
+
return "private"
|
|
946
|
+
elif "protected" in modifiers:
|
|
947
|
+
return "protected"
|
|
948
|
+
else:
|
|
949
|
+
return "package"
|
|
1147
950
|
|
|
1148
|
-
def
|
|
1149
|
-
"""
|
|
1150
|
-
|
|
951
|
+
def _find_annotations_for_line_cached(self, line: int) -> list[dict[str, Any]]:
|
|
952
|
+
"""Find annotations for a specific line with caching"""
|
|
953
|
+
if line in self._annotation_cache:
|
|
954
|
+
return self._annotation_cache[line]
|
|
1151
955
|
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
956
|
+
# Find annotations near this line
|
|
957
|
+
annotations = []
|
|
958
|
+
for annotation in self.annotations:
|
|
959
|
+
if abs(annotation.get("line", 0) - line) <= 2:
|
|
960
|
+
annotations.append(annotation)
|
|
1157
961
|
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
if self._language_cache is None:
|
|
1161
|
-
try:
|
|
1162
|
-
import tree_sitter_java as tsjava
|
|
962
|
+
self._annotation_cache[line] = annotations
|
|
963
|
+
return annotations
|
|
1163
964
|
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
965
|
+
def _is_nested_class(self, node: "tree_sitter.Node") -> bool:
|
|
966
|
+
"""Check if this is a nested class"""
|
|
967
|
+
parent = node.parent
|
|
968
|
+
while parent:
|
|
969
|
+
if parent.type in [
|
|
970
|
+
"class_declaration",
|
|
971
|
+
"interface_declaration",
|
|
972
|
+
"enum_declaration",
|
|
973
|
+
]:
|
|
974
|
+
return True
|
|
975
|
+
parent = parent.parent
|
|
976
|
+
return False
|
|
1172
977
|
|
|
1173
|
-
def
|
|
1174
|
-
"""
|
|
1175
|
-
|
|
978
|
+
def _find_parent_class(self, node: "tree_sitter.Node") -> str | None:
|
|
979
|
+
"""Find parent class name for nested classes"""
|
|
980
|
+
parent = node.parent
|
|
981
|
+
while parent:
|
|
982
|
+
if parent.type in [
|
|
983
|
+
"class_declaration",
|
|
984
|
+
"interface_declaration",
|
|
985
|
+
"enum_declaration",
|
|
986
|
+
]:
|
|
987
|
+
for child in parent.children:
|
|
988
|
+
if child.type == "identifier":
|
|
989
|
+
return self._get_node_text_optimized(child)
|
|
990
|
+
parent = parent.parent
|
|
991
|
+
return None
|
|
1176
992
|
|
|
1177
|
-
def
|
|
1178
|
-
"""
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
993
|
+
def _calculate_complexity_optimized(self, node: "tree_sitter.Node") -> int:
|
|
994
|
+
"""Calculate cyclomatic complexity optimized"""
|
|
995
|
+
complexity = 1 # Base complexity
|
|
996
|
+
|
|
997
|
+
# Count decision points
|
|
998
|
+
decision_nodes = [
|
|
999
|
+
"if_statement",
|
|
1000
|
+
"while_statement",
|
|
1001
|
+
"for_statement",
|
|
1002
|
+
"switch_statement",
|
|
1003
|
+
"catch_clause",
|
|
1004
|
+
"conditional_expression",
|
|
1005
|
+
"enhanced_for_statement",
|
|
1006
|
+
]
|
|
1007
|
+
|
|
1008
|
+
def count_decisions(n: "tree_sitter.Node") -> int:
|
|
1009
|
+
count = 0
|
|
1010
|
+
if hasattr(n, "type") and n.type in decision_nodes:
|
|
1011
|
+
count += 1
|
|
1012
|
+
if hasattr(n, "children"):
|
|
1013
|
+
try:
|
|
1014
|
+
for child in n.children:
|
|
1015
|
+
count += count_decisions(child)
|
|
1016
|
+
except (TypeError, AttributeError):
|
|
1017
|
+
# Handle Mock objects or other non-iterable children
|
|
1018
|
+
pass
|
|
1019
|
+
return count
|
|
1020
|
+
|
|
1021
|
+
complexity += count_decisions(node)
|
|
1022
|
+
return complexity
|
|
1183
1023
|
|
|
1184
|
-
def
|
|
1185
|
-
"""
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1024
|
+
def _extract_javadoc_for_line(self, line: int) -> str | None:
|
|
1025
|
+
"""Extract JavaDoc comment for a specific line"""
|
|
1026
|
+
try:
|
|
1027
|
+
# Look for JavaDoc comment before the line
|
|
1028
|
+
for i in range(max(0, line - 10), line):
|
|
1029
|
+
if i < len(self.content_lines):
|
|
1030
|
+
line_content = self.content_lines[i].strip()
|
|
1031
|
+
if line_content.startswith("/**"):
|
|
1032
|
+
# Found start of JavaDoc, collect until */
|
|
1033
|
+
javadoc_lines = []
|
|
1034
|
+
for j in range(i, min(len(self.content_lines), line)):
|
|
1035
|
+
doc_line = self.content_lines[j].strip()
|
|
1036
|
+
javadoc_lines.append(doc_line)
|
|
1037
|
+
if doc_line.endswith("*/"):
|
|
1038
|
+
break
|
|
1039
|
+
return "\n".join(javadoc_lines)
|
|
1040
|
+
except Exception as e:
|
|
1041
|
+
log_debug(f"Failed to extract JavaDoc: {e}")
|
|
1193
1042
|
|
|
1194
|
-
|
|
1195
|
-
self, tree: "tree_sitter.Tree", source_code: str, query_key: str
|
|
1196
|
-
) -> list[dict]:
|
|
1197
|
-
"""
|
|
1198
|
-
Execute query strategy for Java language
|
|
1043
|
+
return None
|
|
1199
1044
|
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1045
|
+
def _extract_class_name(self, node: "tree_sitter.Node") -> str | None:
|
|
1046
|
+
"""Extract class name from a class declaration node."""
|
|
1047
|
+
try:
|
|
1048
|
+
for child in node.children:
|
|
1049
|
+
if child.type == "identifier":
|
|
1050
|
+
return self._get_node_text_optimized(child)
|
|
1051
|
+
return None
|
|
1052
|
+
except Exception as e:
|
|
1053
|
+
log_debug(f"Failed to extract class name: {e}")
|
|
1054
|
+
return None
|
|
1204
1055
|
|
|
1205
|
-
Returns:
|
|
1206
|
-
List of query results
|
|
1207
|
-
"""
|
|
1208
|
-
# Use the extractor to get elements based on query_key
|
|
1209
|
-
extractor = self.get_extractor()
|
|
1210
|
-
|
|
1211
|
-
# Map query keys to extraction methods
|
|
1212
|
-
if query_key in ["method", "methods", "function", "functions"]:
|
|
1213
|
-
elements = extractor.extract_functions(tree, source_code)
|
|
1214
|
-
elif query_key in ["class", "classes"]:
|
|
1215
|
-
elements = extractor.extract_classes(tree, source_code)
|
|
1216
|
-
elif query_key in ["field", "fields", "variable", "variables"]:
|
|
1217
|
-
elements = extractor.extract_variables(tree, source_code)
|
|
1218
|
-
elif query_key in ["import", "imports"]:
|
|
1219
|
-
elements = extractor.extract_imports(tree, source_code)
|
|
1220
|
-
elif query_key in ["package", "packages"]:
|
|
1221
|
-
elements = extractor.extract_packages(tree, source_code)
|
|
1222
|
-
elif query_key in ["annotation", "annotations"]:
|
|
1223
|
-
elements = extractor.extract_annotations(tree, source_code)
|
|
1224
|
-
else:
|
|
1225
|
-
# For unknown query keys, return empty list
|
|
1226
|
-
return []
|
|
1227
|
-
|
|
1228
|
-
# Convert elements to query result format
|
|
1229
|
-
results = []
|
|
1230
|
-
for element in elements:
|
|
1231
|
-
result = {
|
|
1232
|
-
"capture_name": query_key,
|
|
1233
|
-
"node_type": self._get_node_type_for_element(element),
|
|
1234
|
-
"start_line": element.start_line,
|
|
1235
|
-
"end_line": element.end_line,
|
|
1236
|
-
"text": element.raw_text,
|
|
1237
|
-
"name": element.name,
|
|
1238
|
-
}
|
|
1239
|
-
results.append(result)
|
|
1240
1056
|
|
|
1241
|
-
|
|
1057
|
+
class JavaPlugin(LanguagePlugin):
|
|
1058
|
+
"""Java language plugin implementation"""
|
|
1242
1059
|
|
|
1243
|
-
def
|
|
1244
|
-
"""
|
|
1245
|
-
|
|
1060
|
+
def __init__(self) -> None:
|
|
1061
|
+
"""Initialize the Java language plugin."""
|
|
1062
|
+
super().__init__()
|
|
1063
|
+
self.extractor = JavaElementExtractor()
|
|
1064
|
+
self.language = "java" # Add language property for test compatibility
|
|
1065
|
+
self.supported_extensions = (
|
|
1066
|
+
self.get_file_extensions()
|
|
1067
|
+
) # Add for test compatibility
|
|
1068
|
+
self._cached_language: Any | None = None # Cache for tree-sitter language
|
|
1246
1069
|
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
if not element.is_constructor
|
|
1251
|
-
else "constructor_declaration"
|
|
1252
|
-
)
|
|
1253
|
-
elif isinstance(element, Class):
|
|
1254
|
-
if element.class_type == "interface":
|
|
1255
|
-
return "interface_declaration"
|
|
1256
|
-
elif element.class_type == "enum":
|
|
1257
|
-
return "enum_declaration"
|
|
1258
|
-
else:
|
|
1259
|
-
return "class_declaration"
|
|
1260
|
-
elif isinstance(element, Variable):
|
|
1261
|
-
return "field_declaration"
|
|
1262
|
-
elif isinstance(element, Import):
|
|
1263
|
-
return "import_declaration"
|
|
1264
|
-
elif isinstance(element, Package):
|
|
1265
|
-
return "package_declaration"
|
|
1266
|
-
else:
|
|
1267
|
-
return "unknown"
|
|
1070
|
+
def get_language_name(self) -> str:
|
|
1071
|
+
"""Get the language name."""
|
|
1072
|
+
return "java"
|
|
1268
1073
|
|
|
1269
|
-
def
|
|
1270
|
-
"""
|
|
1271
|
-
|
|
1074
|
+
def get_file_extensions(self) -> list[str]:
|
|
1075
|
+
"""Get supported file extensions."""
|
|
1076
|
+
return [".java", ".jsp", ".jspx"]
|
|
1272
1077
|
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
return {
|
|
1277
|
-
# Method-related queries
|
|
1278
|
-
"method": ["method_declaration"],
|
|
1279
|
-
"methods": ["method_declaration"],
|
|
1280
|
-
"constructor": ["constructor_declaration"],
|
|
1281
|
-
"constructors": ["constructor_declaration"],
|
|
1282
|
-
# Class-related queries
|
|
1283
|
-
"class": ["class_declaration"],
|
|
1284
|
-
"classes": ["class_declaration"],
|
|
1285
|
-
"interface": ["interface_declaration"],
|
|
1286
|
-
"interfaces": ["interface_declaration"],
|
|
1287
|
-
"enum": ["enum_declaration"],
|
|
1288
|
-
"enums": ["enum_declaration"],
|
|
1289
|
-
# Field-related queries
|
|
1290
|
-
"field": ["field_declaration"],
|
|
1291
|
-
"fields": ["field_declaration"],
|
|
1292
|
-
# Import-related queries
|
|
1293
|
-
"import": ["import_declaration"],
|
|
1294
|
-
"imports": ["import_declaration"],
|
|
1295
|
-
# Package-related queries
|
|
1296
|
-
"package": ["package_declaration"],
|
|
1297
|
-
"packages": ["package_declaration"],
|
|
1298
|
-
# Annotation-related queries
|
|
1299
|
-
"annotation": ["annotation", "marker_annotation"],
|
|
1300
|
-
"annotations": ["annotation", "marker_annotation"],
|
|
1301
|
-
# Generic queries
|
|
1302
|
-
"all_elements": [
|
|
1303
|
-
"method_declaration",
|
|
1304
|
-
"constructor_declaration",
|
|
1305
|
-
"class_declaration",
|
|
1306
|
-
"interface_declaration",
|
|
1307
|
-
"enum_declaration",
|
|
1308
|
-
"field_declaration",
|
|
1309
|
-
"import_declaration",
|
|
1310
|
-
"package_declaration",
|
|
1311
|
-
"annotation",
|
|
1312
|
-
"marker_annotation",
|
|
1313
|
-
],
|
|
1314
|
-
}
|
|
1078
|
+
def create_extractor(self) -> ElementExtractor:
|
|
1079
|
+
"""Create a new element extractor instance."""
|
|
1080
|
+
return JavaElementExtractor()
|
|
1315
1081
|
|
|
1316
1082
|
async def analyze_file(
|
|
1317
1083
|
self, file_path: str, request: "AnalysisRequest"
|
|
1318
1084
|
) -> "AnalysisResult":
|
|
1319
|
-
"""
|
|
1320
|
-
Analyze a Java file and return analysis results.
|
|
1085
|
+
"""Analyze Java code and return structured results."""
|
|
1321
1086
|
|
|
1322
|
-
|
|
1323
|
-
file_path: Path to the Java file to analyze
|
|
1324
|
-
request: Analysis request object
|
|
1087
|
+
from ..models import AnalysisResult
|
|
1325
1088
|
|
|
1326
|
-
Returns:
|
|
1327
|
-
AnalysisResult object containing the analysis results
|
|
1328
|
-
"""
|
|
1329
1089
|
try:
|
|
1330
|
-
|
|
1331
|
-
from ..
|
|
1332
|
-
|
|
1333
|
-
log_debug(f"Java Plugin: Starting analysis of {file_path}")
|
|
1334
|
-
|
|
1335
|
-
# Read file content
|
|
1336
|
-
with open(file_path, encoding="utf-8") as f:
|
|
1337
|
-
source_code = f.read()
|
|
1338
|
-
|
|
1339
|
-
log_debug(f"Java Plugin: Read {len(source_code)} characters from file")
|
|
1090
|
+
# Read the file content using safe encoding detection
|
|
1091
|
+
from ..encoding_utils import read_file_safe
|
|
1340
1092
|
|
|
1341
|
-
|
|
1342
|
-
parser = Parser()
|
|
1343
|
-
parse_result = parser.parse_code(source_code, "java")
|
|
1093
|
+
file_content, detected_encoding = read_file_safe(file_path)
|
|
1344
1094
|
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
if
|
|
1348
|
-
|
|
1095
|
+
# Get tree-sitter language and parse
|
|
1096
|
+
language = self.get_tree_sitter_language()
|
|
1097
|
+
if language is None:
|
|
1098
|
+
# Return empty result if language loading fails
|
|
1349
1099
|
return AnalysisResult(
|
|
1350
1100
|
file_path=file_path,
|
|
1351
1101
|
language="java",
|
|
1352
|
-
line_count=len(
|
|
1102
|
+
line_count=len(file_content.split("\n")),
|
|
1353
1103
|
elements=[],
|
|
1354
|
-
|
|
1355
|
-
query_results={},
|
|
1356
|
-
source_code=source_code,
|
|
1357
|
-
success=False,
|
|
1358
|
-
error_message=parse_result.error_message,
|
|
1104
|
+
source_code=file_content,
|
|
1359
1105
|
)
|
|
1360
1106
|
|
|
1361
|
-
#
|
|
1362
|
-
|
|
1107
|
+
# Parse the code
|
|
1108
|
+
import tree_sitter
|
|
1109
|
+
|
|
1110
|
+
parser = tree_sitter.Parser()
|
|
1363
1111
|
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1112
|
+
# Set language using the appropriate method
|
|
1113
|
+
if hasattr(parser, "set_language"):
|
|
1114
|
+
parser.set_language(language)
|
|
1115
|
+
elif hasattr(parser, "language"):
|
|
1116
|
+
parser.language = language
|
|
1117
|
+
else:
|
|
1118
|
+
# Try constructor approach as last resort
|
|
1119
|
+
try:
|
|
1120
|
+
parser = tree_sitter.Parser(language)
|
|
1121
|
+
except Exception as e:
|
|
1122
|
+
log_error(f"Failed to create parser with language: {e}")
|
|
1123
|
+
return AnalysisResult(
|
|
1124
|
+
file_path=file_path,
|
|
1125
|
+
language="java",
|
|
1126
|
+
line_count=len(file_content.split("\n")),
|
|
1127
|
+
elements=[],
|
|
1128
|
+
source_code=file_content,
|
|
1129
|
+
error_message=f"Parser creation failed: {e}",
|
|
1130
|
+
success=False,
|
|
1131
|
+
)
|
|
1368
1132
|
|
|
1369
|
-
|
|
1370
|
-
functions = extractor.extract_functions(parse_result.tree, source_code)
|
|
1371
|
-
log_debug(f"Java Plugin: Found {len(functions)} functions")
|
|
1133
|
+
tree = parser.parse(file_content.encode("utf-8"))
|
|
1372
1134
|
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
log_debug(f"Java Plugin: Found {len(classes)} classes")
|
|
1135
|
+
# Extract elements using our extractor
|
|
1136
|
+
elements_dict = self.extract_elements(tree, file_content)
|
|
1376
1137
|
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1138
|
+
# Combine all elements into a single list
|
|
1139
|
+
all_elements = []
|
|
1140
|
+
all_elements.extend(elements_dict.get("functions", []))
|
|
1141
|
+
all_elements.extend(elements_dict.get("classes", []))
|
|
1142
|
+
all_elements.extend(elements_dict.get("variables", []))
|
|
1143
|
+
all_elements.extend(elements_dict.get("imports", []))
|
|
1144
|
+
all_elements.extend(elements_dict.get("packages", []))
|
|
1380
1145
|
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
classes = []
|
|
1388
|
-
variables = []
|
|
1389
|
-
imports = []
|
|
1390
|
-
|
|
1391
|
-
# Combine all elements
|
|
1392
|
-
all_elements: list[CodeElement] = []
|
|
1393
|
-
all_elements.extend(packages)
|
|
1394
|
-
all_elements.extend(functions)
|
|
1395
|
-
all_elements.extend(classes)
|
|
1396
|
-
all_elements.extend(variables)
|
|
1397
|
-
all_elements.extend(imports)
|
|
1398
|
-
log_debug(f"Java Plugin: Total elements: {len(all_elements)}")
|
|
1146
|
+
# Get package info if available
|
|
1147
|
+
packages = elements_dict.get("packages", [])
|
|
1148
|
+
package = packages[0] if packages else None
|
|
1149
|
+
|
|
1150
|
+
# Count nodes in the AST tree
|
|
1151
|
+
node_count = self._count_tree_nodes(tree.root_node) if tree and tree.root_node else 0
|
|
1399
1152
|
|
|
1400
1153
|
return AnalysisResult(
|
|
1401
1154
|
file_path=file_path,
|
|
1402
1155
|
language="java",
|
|
1403
|
-
line_count=len(
|
|
1156
|
+
line_count=len(file_content.split("\n")),
|
|
1404
1157
|
elements=all_elements,
|
|
1405
|
-
node_count=
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
query_results={},
|
|
1409
|
-
source_code=source_code,
|
|
1410
|
-
success=True,
|
|
1411
|
-
error_message=None,
|
|
1158
|
+
node_count=node_count,
|
|
1159
|
+
source_code=file_content,
|
|
1160
|
+
package=package,
|
|
1412
1161
|
)
|
|
1413
1162
|
|
|
1414
1163
|
except Exception as e:
|
|
1415
|
-
log_error(f"
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
log_error(f"Java Plugin traceback: {traceback.format_exc()}")
|
|
1164
|
+
log_error(f"Error analyzing Java file {file_path}: {e}")
|
|
1165
|
+
# Return empty result on error
|
|
1419
1166
|
return AnalysisResult(
|
|
1420
1167
|
file_path=file_path,
|
|
1421
1168
|
language="java",
|
|
1422
1169
|
line_count=0,
|
|
1423
1170
|
elements=[],
|
|
1424
|
-
node_count=0,
|
|
1425
|
-
query_results={},
|
|
1426
1171
|
source_code="",
|
|
1427
|
-
success=False,
|
|
1428
1172
|
error_message=str(e),
|
|
1173
|
+
success=False,
|
|
1429
1174
|
)
|
|
1430
1175
|
|
|
1431
|
-
def
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1176
|
+
def _count_tree_nodes(self, node: Any) -> int:
|
|
1177
|
+
"""
|
|
1178
|
+
Recursively count nodes in the AST tree.
|
|
1179
|
+
|
|
1180
|
+
Args:
|
|
1181
|
+
node: Tree-sitter node
|
|
1182
|
+
|
|
1183
|
+
Returns:
|
|
1184
|
+
Total number of nodes
|
|
1185
|
+
"""
|
|
1186
|
+
if node is None:
|
|
1187
|
+
return 0
|
|
1188
|
+
|
|
1189
|
+
count = 1 # Count current node
|
|
1190
|
+
if hasattr(node, "children"):
|
|
1191
|
+
for child in node.children:
|
|
1192
|
+
count += self._count_tree_nodes(child)
|
|
1193
|
+
return count
|
|
1194
|
+
|
|
1195
|
+
def get_tree_sitter_language(self) -> Any | None:
|
|
1196
|
+
"""Get the tree-sitter language for Java."""
|
|
1197
|
+
if self._cached_language is not None:
|
|
1198
|
+
return self._cached_language
|
|
1199
|
+
|
|
1200
|
+
try:
|
|
1201
|
+
import tree_sitter
|
|
1202
|
+
import tree_sitter_java
|
|
1203
|
+
|
|
1204
|
+
# Get the language function result
|
|
1205
|
+
caps_or_lang = tree_sitter_java.language()
|
|
1206
|
+
|
|
1207
|
+
# Convert to proper Language object if needed
|
|
1208
|
+
if hasattr(caps_or_lang, "__class__") and "Language" in str(
|
|
1209
|
+
type(caps_or_lang)
|
|
1210
|
+
):
|
|
1211
|
+
# Already a Language object
|
|
1212
|
+
self._cached_language = caps_or_lang
|
|
1213
|
+
else:
|
|
1214
|
+
# PyCapsule - convert to Language object
|
|
1215
|
+
try:
|
|
1216
|
+
# Use modern tree-sitter API - PyCapsule should be passed to Language constructor
|
|
1217
|
+
self._cached_language = tree_sitter.Language(caps_or_lang)
|
|
1218
|
+
except Exception as e:
|
|
1219
|
+
log_error(f"Failed to create Language object from PyCapsule: {e}")
|
|
1220
|
+
return None
|
|
1221
|
+
|
|
1222
|
+
return self._cached_language
|
|
1223
|
+
except ImportError as e:
|
|
1224
|
+
log_error(f"tree-sitter-java not available: {e}")
|
|
1225
|
+
return None
|
|
1226
|
+
except Exception as e:
|
|
1227
|
+
log_error(f"Failed to load tree-sitter language for Java: {e}")
|
|
1228
|
+
return None
|
|
1229
|
+
|
|
1230
|
+
def extract_elements(self, tree: Any | None, source_code: str) -> dict[str, Any]:
|
|
1231
|
+
"""Extract all elements from Java code for test compatibility."""
|
|
1232
|
+
if tree is None:
|
|
1436
1233
|
return {
|
|
1437
|
-
"packages": [],
|
|
1438
1234
|
"functions": [],
|
|
1439
1235
|
"classes": [],
|
|
1440
1236
|
"variables": [],
|
|
1441
1237
|
"imports": [],
|
|
1238
|
+
"packages": [],
|
|
1442
1239
|
"annotations": [],
|
|
1443
1240
|
}
|
|
1444
1241
|
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1242
|
+
try:
|
|
1243
|
+
extractor = self.create_extractor()
|
|
1244
|
+
return {
|
|
1245
|
+
"functions": extractor.extract_functions(tree, source_code),
|
|
1246
|
+
"classes": extractor.extract_classes(tree, source_code),
|
|
1247
|
+
"variables": extractor.extract_variables(tree, source_code),
|
|
1248
|
+
"imports": extractor.extract_imports(tree, source_code),
|
|
1249
|
+
"packages": extractor.extract_packages(tree, source_code),
|
|
1250
|
+
"annotations": extractor.extract_annotations(tree, source_code),
|
|
1251
|
+
}
|
|
1252
|
+
except Exception as e:
|
|
1253
|
+
log_error(f"Error extracting elements: {e}")
|
|
1254
|
+
return {
|
|
1255
|
+
"functions": [],
|
|
1256
|
+
"classes": [],
|
|
1257
|
+
"variables": [],
|
|
1258
|
+
"imports": [],
|
|
1259
|
+
"packages": [],
|
|
1260
|
+
"annotations": [],
|
|
1261
|
+
}
|
|
1456
1262
|
|
|
1457
|
-
|
|
1263
|
+
def supports_file(self, file_path: str) -> bool:
|
|
1264
|
+
"""Check if this plugin supports the given file."""
|
|
1265
|
+
return any(
|
|
1266
|
+
file_path.lower().endswith(ext) for ext in self.get_file_extensions()
|
|
1267
|
+
)
|