tree-sitter-analyzer 1.8.4__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tree-sitter-analyzer might be problematic. Click here for more details.

Files changed (64) hide show
  1. tree_sitter_analyzer/__init__.py +1 -1
  2. tree_sitter_analyzer/api.py +4 -4
  3. tree_sitter_analyzer/cli/argument_validator.py +29 -17
  4. tree_sitter_analyzer/cli/commands/advanced_command.py +7 -5
  5. tree_sitter_analyzer/cli/commands/structure_command.py +7 -5
  6. tree_sitter_analyzer/cli/commands/summary_command.py +10 -6
  7. tree_sitter_analyzer/cli/commands/table_command.py +8 -7
  8. tree_sitter_analyzer/cli/info_commands.py +1 -1
  9. tree_sitter_analyzer/cli_main.py +3 -2
  10. tree_sitter_analyzer/core/analysis_engine.py +5 -5
  11. tree_sitter_analyzer/core/cache_service.py +3 -1
  12. tree_sitter_analyzer/core/query.py +17 -5
  13. tree_sitter_analyzer/core/query_service.py +1 -1
  14. tree_sitter_analyzer/encoding_utils.py +3 -3
  15. tree_sitter_analyzer/exceptions.py +61 -50
  16. tree_sitter_analyzer/file_handler.py +3 -0
  17. tree_sitter_analyzer/formatters/base_formatter.py +10 -5
  18. tree_sitter_analyzer/formatters/formatter_registry.py +83 -68
  19. tree_sitter_analyzer/formatters/html_formatter.py +90 -54
  20. tree_sitter_analyzer/formatters/javascript_formatter.py +21 -16
  21. tree_sitter_analyzer/formatters/language_formatter_factory.py +7 -6
  22. tree_sitter_analyzer/formatters/markdown_formatter.py +247 -124
  23. tree_sitter_analyzer/formatters/python_formatter.py +61 -38
  24. tree_sitter_analyzer/formatters/typescript_formatter.py +113 -45
  25. tree_sitter_analyzer/interfaces/mcp_server.py +2 -2
  26. tree_sitter_analyzer/language_detector.py +6 -6
  27. tree_sitter_analyzer/language_loader.py +3 -1
  28. tree_sitter_analyzer/languages/css_plugin.py +120 -61
  29. tree_sitter_analyzer/languages/html_plugin.py +159 -62
  30. tree_sitter_analyzer/languages/java_plugin.py +42 -34
  31. tree_sitter_analyzer/languages/javascript_plugin.py +59 -30
  32. tree_sitter_analyzer/languages/markdown_plugin.py +402 -368
  33. tree_sitter_analyzer/languages/python_plugin.py +111 -64
  34. tree_sitter_analyzer/languages/typescript_plugin.py +241 -132
  35. tree_sitter_analyzer/mcp/server.py +22 -18
  36. tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py +13 -8
  37. tree_sitter_analyzer/mcp/tools/base_tool.py +2 -2
  38. tree_sitter_analyzer/mcp/tools/fd_rg_utils.py +232 -26
  39. tree_sitter_analyzer/mcp/tools/find_and_grep_tool.py +31 -23
  40. tree_sitter_analyzer/mcp/tools/list_files_tool.py +21 -19
  41. tree_sitter_analyzer/mcp/tools/query_tool.py +17 -18
  42. tree_sitter_analyzer/mcp/tools/read_partial_tool.py +30 -31
  43. tree_sitter_analyzer/mcp/tools/search_content_tool.py +131 -77
  44. tree_sitter_analyzer/mcp/tools/table_format_tool.py +29 -16
  45. tree_sitter_analyzer/mcp/utils/file_output_factory.py +64 -51
  46. tree_sitter_analyzer/mcp/utils/file_output_manager.py +34 -24
  47. tree_sitter_analyzer/mcp/utils/gitignore_detector.py +8 -4
  48. tree_sitter_analyzer/models.py +7 -5
  49. tree_sitter_analyzer/plugins/base.py +9 -7
  50. tree_sitter_analyzer/plugins/manager.py +1 -0
  51. tree_sitter_analyzer/queries/css.py +2 -21
  52. tree_sitter_analyzer/queries/html.py +2 -15
  53. tree_sitter_analyzer/queries/markdown.py +30 -41
  54. tree_sitter_analyzer/queries/python.py +20 -5
  55. tree_sitter_analyzer/query_loader.py +5 -5
  56. tree_sitter_analyzer/security/validator.py +114 -86
  57. tree_sitter_analyzer/utils/__init__.py +58 -28
  58. tree_sitter_analyzer/utils/tree_sitter_compat.py +72 -65
  59. tree_sitter_analyzer/utils.py +26 -15
  60. {tree_sitter_analyzer-1.8.4.dist-info → tree_sitter_analyzer-1.9.0.dist-info}/METADATA +1 -1
  61. tree_sitter_analyzer-1.9.0.dist-info/RECORD +109 -0
  62. tree_sitter_analyzer-1.8.4.dist-info/RECORD +0 -109
  63. {tree_sitter_analyzer-1.8.4.dist-info → tree_sitter_analyzer-1.9.0.dist-info}/WHEEL +0 -0
  64. {tree_sitter_analyzer-1.8.4.dist-info → tree_sitter_analyzer-1.9.0.dist-info}/entry_points.txt +0 -0
@@ -8,7 +8,7 @@ attribute parsing, and document structure analysis.
8
8
  """
9
9
 
10
10
  import logging
11
- from typing import TYPE_CHECKING, Any
11
+ from typing import TYPE_CHECKING
12
12
 
13
13
  from ..models import AnalysisResult, MarkupElement
14
14
  from ..plugins.base import ElementExtractor, LanguagePlugin
@@ -28,14 +28,80 @@ class HtmlElementExtractor(ElementExtractor):
28
28
  def __init__(self):
29
29
  self.element_categories = {
30
30
  # HTML要素の分類システム
31
- "structure": ["html", "body", "div", "span", "section", "article", "aside", "nav", "main", "header", "footer"],
31
+ "structure": [
32
+ "html",
33
+ "body",
34
+ "div",
35
+ "span",
36
+ "section",
37
+ "article",
38
+ "aside",
39
+ "nav",
40
+ "main",
41
+ "header",
42
+ "footer",
43
+ ],
32
44
  "heading": ["h1", "h2", "h3", "h4", "h5", "h6"],
33
- "text": ["p", "a", "strong", "em", "b", "i", "u", "small", "mark", "del", "ins", "sub", "sup"],
45
+ "text": [
46
+ "p",
47
+ "a",
48
+ "strong",
49
+ "em",
50
+ "b",
51
+ "i",
52
+ "u",
53
+ "small",
54
+ "mark",
55
+ "del",
56
+ "ins",
57
+ "sub",
58
+ "sup",
59
+ ],
34
60
  "list": ["ul", "ol", "li", "dl", "dt", "dd"],
35
- "media": ["img", "video", "audio", "source", "track", "canvas", "svg", "picture"],
36
- "form": ["form", "input", "textarea", "button", "select", "option", "optgroup", "label", "fieldset", "legend"],
37
- "table": ["table", "thead", "tbody", "tfoot", "tr", "td", "th", "caption", "colgroup", "col"],
38
- "metadata": ["head", "title", "meta", "link", "style", "script", "noscript", "base"]
61
+ "media": [
62
+ "img",
63
+ "video",
64
+ "audio",
65
+ "source",
66
+ "track",
67
+ "canvas",
68
+ "svg",
69
+ "picture",
70
+ ],
71
+ "form": [
72
+ "form",
73
+ "input",
74
+ "textarea",
75
+ "button",
76
+ "select",
77
+ "option",
78
+ "optgroup",
79
+ "label",
80
+ "fieldset",
81
+ "legend",
82
+ ],
83
+ "table": [
84
+ "table",
85
+ "thead",
86
+ "tbody",
87
+ "tfoot",
88
+ "tr",
89
+ "td",
90
+ "th",
91
+ "caption",
92
+ "colgroup",
93
+ "col",
94
+ ],
95
+ "metadata": [
96
+ "head",
97
+ "title",
98
+ "meta",
99
+ "link",
100
+ "style",
101
+ "script",
102
+ "noscript",
103
+ "base",
104
+ ],
39
105
  }
40
106
 
41
107
  def extract_functions(self, tree: "tree_sitter.Tree", source_code: str) -> list:
@@ -54,13 +120,17 @@ class HtmlElementExtractor(ElementExtractor):
54
120
  """HTML doesn't have imports, return empty list"""
55
121
  return []
56
122
 
57
- def extract_html_elements(self, tree: "tree_sitter.Tree", source_code: str) -> list[MarkupElement]:
123
+ def extract_html_elements(
124
+ self, tree: "tree_sitter.Tree", source_code: str
125
+ ) -> list[MarkupElement]:
58
126
  """Extract HTML elements using tree-sitter-html parser"""
59
127
  elements = []
60
-
128
+
61
129
  try:
62
130
  if hasattr(tree, "root_node"):
63
- self._traverse_for_html_elements(tree.root_node, elements, source_code, None)
131
+ self._traverse_for_html_elements(
132
+ tree.root_node, elements, source_code, None
133
+ )
64
134
  except Exception as e:
65
135
  log_error(f"Error in HTML element extraction: {e}")
66
136
 
@@ -71,7 +141,7 @@ class HtmlElementExtractor(ElementExtractor):
71
141
  node: "tree_sitter.Node",
72
142
  elements: list[MarkupElement],
73
143
  source_code: str,
74
- parent: MarkupElement | None
144
+ parent: MarkupElement | None,
75
145
  ) -> None:
76
146
  """Traverse tree to find HTML elements using tree-sitter-html grammar"""
77
147
  if hasattr(node, "type") and self._is_html_element_node(node.type):
@@ -79,11 +149,13 @@ class HtmlElementExtractor(ElementExtractor):
79
149
  element = self._create_markup_element(node, source_code, parent)
80
150
  if element:
81
151
  elements.append(element)
82
-
152
+
83
153
  # Process children with this element as parent
84
154
  if hasattr(node, "children"):
85
155
  for child in node.children:
86
- self._traverse_for_html_elements(child, elements, source_code, element)
156
+ self._traverse_for_html_elements(
157
+ child, elements, source_code, element
158
+ )
87
159
  return
88
160
  except Exception as e:
89
161
  log_debug(f"Failed to extract HTML element: {e}")
@@ -101,15 +173,12 @@ class HtmlElementExtractor(ElementExtractor):
101
173
  "self_closing_tag",
102
174
  "script_element",
103
175
  "style_element",
104
- "void_element"
176
+ "void_element",
105
177
  ]
106
178
  return node_type in html_element_types
107
179
 
108
180
  def _create_markup_element(
109
- self,
110
- node: "tree_sitter.Node",
111
- source_code: str,
112
- parent: MarkupElement | None
181
+ self, node: "tree_sitter.Node", source_code: str, parent: MarkupElement | None
113
182
  ) -> MarkupElement | None:
114
183
  """Create MarkupElement from tree-sitter node using tree-sitter-html grammar"""
115
184
  try:
@@ -130,7 +199,9 @@ class HtmlElementExtractor(ElementExtractor):
130
199
  # Create MarkupElement
131
200
  element = MarkupElement(
132
201
  name=tag_name,
133
- start_line=node.start_point[0] + 1 if hasattr(node, "start_point") else 0,
202
+ start_line=node.start_point[0] + 1
203
+ if hasattr(node, "start_point")
204
+ else 0,
134
205
  end_line=node.end_point[0] + 1 if hasattr(node, "end_point") else 0,
135
206
  raw_text=raw_text,
136
207
  language="html",
@@ -138,7 +209,7 @@ class HtmlElementExtractor(ElementExtractor):
138
209
  attributes=attributes,
139
210
  parent=parent,
140
211
  children=[],
141
- element_class=element_class
212
+ element_class=element_class,
142
213
  )
143
214
 
144
215
  # Add to parent's children if parent exists
@@ -164,38 +235,52 @@ class HtmlElementExtractor(ElementExtractor):
164
235
  elif child.type in ["start_tag", "self_closing_tag"]:
165
236
  # Look for tag_name within start_tag or self_closing_tag
166
237
  for grandchild in child.children:
167
- if hasattr(grandchild, "type") and grandchild.type == "tag_name":
168
- return self._extract_node_text(grandchild, source_code).strip()
169
-
238
+ if (
239
+ hasattr(grandchild, "type")
240
+ and grandchild.type == "tag_name"
241
+ ):
242
+ return self._extract_node_text(
243
+ grandchild, source_code
244
+ ).strip()
245
+
170
246
  # Fallback: try to extract from node text
171
247
  node_text = self._extract_node_text(node, source_code)
172
248
  if node_text.startswith("<"):
173
249
  # Extract tag name from <tagname ...> pattern
174
250
  tag_part = node_text.split(">")[0].split()[0]
175
251
  return tag_part.lstrip("<").rstrip(">")
176
-
252
+
177
253
  return "unknown"
178
254
  except Exception:
179
255
  return "unknown"
180
256
 
181
- def _extract_attributes(self, node: "tree_sitter.Node", source_code: str) -> dict[str, str]:
257
+ def _extract_attributes(
258
+ self, node: "tree_sitter.Node", source_code: str
259
+ ) -> dict[str, str]:
182
260
  """Extract attributes from HTML element node using tree-sitter-html grammar"""
183
261
  attributes = {}
184
-
262
+
185
263
  try:
186
264
  if hasattr(node, "children"):
187
265
  for child in node.children:
188
266
  if hasattr(child, "type"):
189
267
  # Handle attribute nodes in tree-sitter-html
190
268
  if child.type == "attribute":
191
- attr_name, attr_value = self._parse_attribute(child, source_code)
269
+ attr_name, attr_value = self._parse_attribute(
270
+ child, source_code
271
+ )
192
272
  if attr_name:
193
273
  attributes[attr_name] = attr_value
194
274
  elif child.type in ["start_tag", "self_closing_tag"]:
195
275
  # Look for attributes within start_tag or self_closing_tag
196
276
  for grandchild in child.children:
197
- if hasattr(grandchild, "type") and grandchild.type == "attribute":
198
- attr_name, attr_value = self._parse_attribute(grandchild, source_code)
277
+ if (
278
+ hasattr(grandchild, "type")
279
+ and grandchild.type == "attribute"
280
+ ):
281
+ attr_name, attr_value = self._parse_attribute(
282
+ grandchild, source_code
283
+ )
199
284
  if attr_name:
200
285
  attributes[attr_name] = attr_value
201
286
  except Exception as e:
@@ -203,23 +288,34 @@ class HtmlElementExtractor(ElementExtractor):
203
288
 
204
289
  return attributes
205
290
 
206
- def _parse_attribute(self, attr_node: "tree_sitter.Node", source_code: str) -> tuple[str, str]:
291
+ def _parse_attribute(
292
+ self, attr_node: "tree_sitter.Node", source_code: str
293
+ ) -> tuple[str, str]:
207
294
  """Parse individual attribute node using tree-sitter-html grammar"""
208
295
  try:
209
296
  # In tree-sitter-html, attributes have specific structure
210
297
  attr_name = ""
211
298
  attr_value = ""
212
-
299
+
213
300
  if hasattr(attr_node, "children"):
214
301
  for child in attr_node.children:
215
302
  if hasattr(child, "type"):
216
303
  if child.type == "attribute_name":
217
- attr_name = self._extract_node_text(child, source_code).strip()
304
+ attr_name = self._extract_node_text(
305
+ child, source_code
306
+ ).strip()
218
307
  elif child.type == "quoted_attribute_value":
219
- attr_value = self._extract_node_text(child, source_code).strip().strip('"').strip("'")
308
+ attr_value = (
309
+ self._extract_node_text(child, source_code)
310
+ .strip()
311
+ .strip('"')
312
+ .strip("'")
313
+ )
220
314
  elif child.type == "attribute_value":
221
- attr_value = self._extract_node_text(child, source_code).strip()
222
-
315
+ attr_value = self._extract_node_text(
316
+ child, source_code
317
+ ).strip()
318
+
223
319
  # Fallback to simple parsing
224
320
  if not attr_name:
225
321
  attr_text = self._extract_node_text(attr_node, source_code)
@@ -231,7 +327,7 @@ class HtmlElementExtractor(ElementExtractor):
231
327
  # Boolean attribute
232
328
  attr_name = attr_text.strip()
233
329
  attr_value = ""
234
-
330
+
235
331
  return attr_name, attr_value
236
332
  except Exception:
237
333
  return "", ""
@@ -239,11 +335,11 @@ class HtmlElementExtractor(ElementExtractor):
239
335
  def _classify_element(self, tag_name: str) -> str:
240
336
  """Classify HTML element based on tag name"""
241
337
  tag_name_lower = tag_name.lower()
242
-
338
+
243
339
  for category, tags in self.element_categories.items():
244
340
  if tag_name_lower in tags:
245
341
  return category
246
-
342
+
247
343
  return "unknown"
248
344
 
249
345
  def _extract_node_text(self, node: "tree_sitter.Node", source_code: str) -> str:
@@ -277,13 +373,16 @@ class HtmlPlugin(LanguagePlugin):
277
373
  def get_queries(self) -> dict[str, str]:
278
374
  """Return HTML-specific tree-sitter queries"""
279
375
  from ..queries.html import HTML_QUERIES
376
+
280
377
  return HTML_QUERIES
281
378
 
282
- def execute_query_strategy(self, query_key: str | None, language: str) -> str | None:
379
+ def execute_query_strategy(
380
+ self, query_key: str | None, language: str
381
+ ) -> str | None:
283
382
  """Execute query strategy for HTML"""
284
383
  if language != "html":
285
384
  return None
286
-
385
+
287
386
  queries = self.get_queries()
288
387
  return queries.get(query_key) if query_key else None
289
388
 
@@ -297,44 +396,40 @@ class HtmlPlugin(LanguagePlugin):
297
396
  "media": ["element"],
298
397
  "form": ["element"],
299
398
  "table": ["element"],
300
- "metadata": ["element"]
399
+ "metadata": ["element"],
301
400
  }
302
401
 
303
402
  async def analyze_file(
304
403
  self, file_path: str, request: "AnalysisRequest"
305
404
  ) -> "AnalysisResult":
306
405
  """Analyze HTML file using tree-sitter-html parser"""
307
- from ..core.analysis_engine import UnifiedAnalysisEngine
308
406
  from ..encoding_utils import read_file_safe
309
407
 
310
408
  try:
311
409
  # Read file content
312
410
  content, encoding = read_file_safe(file_path)
313
-
314
- # Create analysis engine
315
- engine = UnifiedAnalysisEngine()
316
-
411
+
317
412
  # Use tree-sitter-html for parsing
318
413
  try:
319
- import tree_sitter_html as ts_html
320
414
  import tree_sitter
321
-
415
+ import tree_sitter_html as ts_html
416
+
322
417
  # Get HTML language
323
418
  HTML_LANGUAGE = tree_sitter.Language(ts_html.language())
324
-
419
+
325
420
  # Create parser
326
421
  parser = tree_sitter.Parser()
327
422
  parser.language = HTML_LANGUAGE
328
-
423
+
329
424
  # Parse the HTML content
330
- tree = parser.parse(content.encode('utf-8'))
331
-
425
+ tree = parser.parse(content.encode("utf-8"))
426
+
332
427
  # Extract elements using the extractor
333
428
  extractor = self.create_extractor()
334
429
  elements = extractor.extract_html_elements(tree, content)
335
-
430
+
336
431
  log_info(f"Extracted {len(elements)} HTML elements from {file_path}")
337
-
432
+
338
433
  return AnalysisResult(
339
434
  file_path=file_path,
340
435
  language="html",
@@ -344,15 +439,17 @@ class HtmlPlugin(LanguagePlugin):
344
439
  query_results={},
345
440
  source_code=content,
346
441
  success=True,
347
- error_message=None
442
+ error_message=None,
348
443
  )
349
-
444
+
350
445
  except ImportError:
351
- log_error("tree-sitter-html not available, falling back to basic parsing")
446
+ log_error(
447
+ "tree-sitter-html not available, falling back to basic parsing"
448
+ )
352
449
  # Fallback to basic parsing
353
450
  lines = content.splitlines()
354
451
  line_count = len(lines)
355
-
452
+
356
453
  # Create basic MarkupElement for the HTML document
357
454
  html_element = MarkupElement(
358
455
  name="html",
@@ -364,7 +461,7 @@ class HtmlPlugin(LanguagePlugin):
364
461
  attributes={},
365
462
  parent=None,
366
463
  children=[],
367
- element_class="structure"
464
+ element_class="structure",
368
465
  )
369
466
  elements = [html_element]
370
467
 
@@ -377,7 +474,7 @@ class HtmlPlugin(LanguagePlugin):
377
474
  query_results={},
378
475
  source_code=content,
379
476
  success=True,
380
- error_message=None
477
+ error_message=None,
381
478
  )
382
479
 
383
480
  except Exception as e:
@@ -391,5 +488,5 @@ class HtmlPlugin(LanguagePlugin):
391
488
  query_results={},
392
489
  source_code="",
393
490
  success=False,
394
- error_message=str(e)
395
- )
491
+ error_message=str(e),
492
+ )
@@ -201,7 +201,7 @@ class JavaElementExtractor(ElementExtractor):
201
201
  import_name = static_match.group(1)
202
202
  if import_content.endswith(".*"):
203
203
  import_name = import_name.replace(".*", "")
204
-
204
+
205
205
  # For static imports, extract the class name (remove method/field name)
206
206
  parts = import_name.split(".")
207
207
  if len(parts) > 1:
@@ -261,7 +261,7 @@ class JavaElementExtractor(ElementExtractor):
261
261
  if tree is None or tree.root_node is None:
262
262
  log_debug("Tree or root_node is None, returning empty packages list")
263
263
  return packages
264
-
264
+
265
265
  for child in tree.root_node.children:
266
266
  if child.type == "package_declaration":
267
267
  package_info = self._extract_package_element(child)
@@ -1061,7 +1061,7 @@ class JavaElementExtractor(ElementExtractor):
1061
1061
  # Handle wildcard case
1062
1062
  if import_content.endswith(".*"):
1063
1063
  import_name = import_name.replace(".*", "")
1064
-
1064
+
1065
1065
  # For static imports, extract the class name (remove method/field name)
1066
1066
  parts = import_name.split(".")
1067
1067
  if len(parts) > 1:
@@ -1111,7 +1111,7 @@ class JavaElementExtractor(ElementExtractor):
1111
1111
  def extract_elements(self, tree: "tree_sitter.Tree", source_code: str) -> list:
1112
1112
  """Extract elements from source code using tree-sitter AST"""
1113
1113
  elements = []
1114
-
1114
+
1115
1115
  try:
1116
1116
  elements.extend(self.extract_functions(tree, source_code))
1117
1117
  elements.extend(self.extract_classes(tree, source_code))
@@ -1119,7 +1119,7 @@ class JavaElementExtractor(ElementExtractor):
1119
1119
  elements.extend(self.extract_imports(tree, source_code))
1120
1120
  except Exception as e:
1121
1121
  log_error(f"Failed to extract elements: {e}")
1122
-
1122
+
1123
1123
  return elements
1124
1124
 
1125
1125
 
@@ -1130,8 +1130,8 @@ class JavaPlugin(LanguagePlugin):
1130
1130
  """Initialize the Java plugin"""
1131
1131
  super().__init__()
1132
1132
  self._language_cache: tree_sitter.Language | None = None
1133
- self._extractor: Optional[JavaElementExtractor] = None
1134
-
1133
+ self._extractor: JavaElementExtractor | None = None
1134
+
1135
1135
  # Legacy attributes for backward compatibility with tests
1136
1136
  self.language = "java"
1137
1137
  self.extractor = self.create_extractor()
@@ -1191,21 +1191,23 @@ class JavaPlugin(LanguagePlugin):
1191
1191
  "supported_queries": self.get_supported_queries(),
1192
1192
  }
1193
1193
 
1194
- def execute_query_strategy(self, tree: "tree_sitter.Tree", source_code: str, query_key: str) -> list[dict]:
1194
+ def execute_query_strategy(
1195
+ self, tree: "tree_sitter.Tree", source_code: str, query_key: str
1196
+ ) -> list[dict]:
1195
1197
  """
1196
1198
  Execute query strategy for Java language
1197
-
1199
+
1198
1200
  Args:
1199
1201
  tree: Tree-sitter tree object
1200
1202
  source_code: Source code string
1201
1203
  query_key: Query key to execute
1202
-
1204
+
1203
1205
  Returns:
1204
1206
  List of query results
1205
1207
  """
1206
1208
  # Use the extractor to get elements based on query_key
1207
1209
  extractor = self.get_extractor()
1208
-
1210
+
1209
1211
  # Map query keys to extraction methods
1210
1212
  if query_key in ["method", "methods", "function", "functions"]:
1211
1213
  elements = extractor.extract_functions(tree, source_code)
@@ -1222,7 +1224,7 @@ class JavaPlugin(LanguagePlugin):
1222
1224
  else:
1223
1225
  # For unknown query keys, return empty list
1224
1226
  return []
1225
-
1227
+
1226
1228
  # Convert elements to query result format
1227
1229
  results = []
1228
1230
  for element in elements:
@@ -1235,15 +1237,19 @@ class JavaPlugin(LanguagePlugin):
1235
1237
  "name": element.name,
1236
1238
  }
1237
1239
  results.append(result)
1238
-
1240
+
1239
1241
  return results
1240
-
1242
+
1241
1243
  def _get_node_type_for_element(self, element) -> str:
1242
1244
  """Get appropriate node type for element"""
1243
- from ..models import Function, Class, Variable, Import, Package
1244
-
1245
+ from ..models import Class, Function, Import, Package, Variable
1246
+
1245
1247
  if isinstance(element, Function):
1246
- return "method_declaration" if not element.is_constructor else "constructor_declaration"
1248
+ return (
1249
+ "method_declaration"
1250
+ if not element.is_constructor
1251
+ else "constructor_declaration"
1252
+ )
1247
1253
  elif isinstance(element, Class):
1248
1254
  if element.class_type == "interface":
1249
1255
  return "interface_declaration"
@@ -1263,7 +1269,7 @@ class JavaPlugin(LanguagePlugin):
1263
1269
  def get_element_categories(self) -> dict[str, list[str]]:
1264
1270
  """
1265
1271
  Get element categories mapping query keys to node types
1266
-
1272
+
1267
1273
  Returns:
1268
1274
  Dictionary mapping query keys to lists of node types
1269
1275
  """
@@ -1273,7 +1279,6 @@ class JavaPlugin(LanguagePlugin):
1273
1279
  "methods": ["method_declaration"],
1274
1280
  "constructor": ["constructor_declaration"],
1275
1281
  "constructors": ["constructor_declaration"],
1276
-
1277
1282
  # Class-related queries
1278
1283
  "class": ["class_declaration"],
1279
1284
  "classes": ["class_declaration"],
@@ -1281,29 +1286,30 @@ class JavaPlugin(LanguagePlugin):
1281
1286
  "interfaces": ["interface_declaration"],
1282
1287
  "enum": ["enum_declaration"],
1283
1288
  "enums": ["enum_declaration"],
1284
-
1285
1289
  # Field-related queries
1286
1290
  "field": ["field_declaration"],
1287
1291
  "fields": ["field_declaration"],
1288
-
1289
1292
  # Import-related queries
1290
1293
  "import": ["import_declaration"],
1291
1294
  "imports": ["import_declaration"],
1292
-
1293
1295
  # Package-related queries
1294
1296
  "package": ["package_declaration"],
1295
1297
  "packages": ["package_declaration"],
1296
-
1297
1298
  # Annotation-related queries
1298
1299
  "annotation": ["annotation", "marker_annotation"],
1299
1300
  "annotations": ["annotation", "marker_annotation"],
1300
-
1301
1301
  # Generic queries
1302
1302
  "all_elements": [
1303
- "method_declaration", "constructor_declaration",
1304
- "class_declaration", "interface_declaration", "enum_declaration",
1305
- "field_declaration", "import_declaration", "package_declaration",
1306
- "annotation", "marker_annotation"
1303
+ "method_declaration",
1304
+ "constructor_declaration",
1305
+ "class_declaration",
1306
+ "interface_declaration",
1307
+ "enum_declaration",
1308
+ "field_declaration",
1309
+ "import_declaration",
1310
+ "package_declaration",
1311
+ "annotation",
1312
+ "marker_annotation",
1307
1313
  ],
1308
1314
  }
1309
1315
 
@@ -1422,7 +1428,9 @@ class JavaPlugin(LanguagePlugin):
1422
1428
  error_message=str(e),
1423
1429
  )
1424
1430
 
1425
- def extract_elements(self, tree: "tree_sitter.Tree", source_code: str) -> dict[str, list[CodeElement]]:
1431
+ def extract_elements(
1432
+ self, tree: "tree_sitter.Tree", source_code: str
1433
+ ) -> dict[str, list[CodeElement]]:
1426
1434
  """Legacy method for backward compatibility with tests"""
1427
1435
  if not tree or not tree.root_node:
1428
1436
  return {
@@ -1431,11 +1439,11 @@ class JavaPlugin(LanguagePlugin):
1431
1439
  "classes": [],
1432
1440
  "variables": [],
1433
1441
  "imports": [],
1434
- "annotations": []
1442
+ "annotations": [],
1435
1443
  }
1436
-
1444
+
1437
1445
  extractor = self.create_extractor()
1438
-
1446
+
1439
1447
  # Extract all types of elements and return as dictionary
1440
1448
  result = {
1441
1449
  "packages": extractor.extract_packages(tree, source_code),
@@ -1443,7 +1451,7 @@ class JavaPlugin(LanguagePlugin):
1443
1451
  "classes": extractor.extract_classes(tree, source_code),
1444
1452
  "variables": extractor.extract_variables(tree, source_code),
1445
1453
  "imports": extractor.extract_imports(tree, source_code),
1446
- "annotations": extractor.extract_annotations(tree, source_code)
1454
+ "annotations": extractor.extract_annotations(tree, source_code),
1447
1455
  }
1448
-
1456
+
1449
1457
  return result