tree-sitter-analyzer 0.1.2__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tree-sitter-analyzer might be problematic. Click here for more details.
- tree_sitter_analyzer/__init__.py +1 -1
- tree_sitter_analyzer/cli/commands/advanced_command.py +10 -10
- tree_sitter_analyzer/cli/commands/base_command.py +11 -11
- tree_sitter_analyzer/cli/commands/partial_read_command.py +12 -12
- tree_sitter_analyzer/cli/commands/structure_command.py +13 -13
- tree_sitter_analyzer/cli/commands/summary_command.py +8 -8
- tree_sitter_analyzer/cli_main.py +6 -6
- tree_sitter_analyzer/mcp/tools/read_partial_tool.py +5 -5
- {tree_sitter_analyzer-0.1.2.dist-info → tree_sitter_analyzer-0.1.3.dist-info}/METADATA +12 -12
- {tree_sitter_analyzer-0.1.2.dist-info → tree_sitter_analyzer-0.1.3.dist-info}/RECORD +12 -12
- {tree_sitter_analyzer-0.1.2.dist-info → tree_sitter_analyzer-0.1.3.dist-info}/WHEEL +0 -0
- {tree_sitter_analyzer-0.1.2.dist-info → tree_sitter_analyzer-0.1.3.dist-info}/entry_points.txt +0 -0
tree_sitter_analyzer/__init__.py
CHANGED
|
@@ -37,7 +37,7 @@ class AdvancedCommand(BaseCommand):
|
|
|
37
37
|
"node_count": analysis_result.node_count,
|
|
38
38
|
"language": analysis_result.language,
|
|
39
39
|
}
|
|
40
|
-
output_section("
|
|
40
|
+
output_section("Statistics")
|
|
41
41
|
if self.args.output_format == "json":
|
|
42
42
|
output_json(stats)
|
|
43
43
|
else:
|
|
@@ -46,7 +46,7 @@ class AdvancedCommand(BaseCommand):
|
|
|
46
46
|
|
|
47
47
|
def _output_full_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
48
48
|
"""Output full analysis results."""
|
|
49
|
-
output_section("
|
|
49
|
+
output_section("Advanced Analysis Results")
|
|
50
50
|
if self.args.output_format == "json":
|
|
51
51
|
result_dict = {
|
|
52
52
|
"file_path": analysis_result.file_path,
|
|
@@ -72,17 +72,17 @@ class AdvancedCommand(BaseCommand):
|
|
|
72
72
|
|
|
73
73
|
def _output_text_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
74
74
|
"""Output analysis in text format."""
|
|
75
|
-
output_data(f"
|
|
76
|
-
output_data(f"
|
|
77
|
-
output_data(f"
|
|
75
|
+
output_data(f"File: {analysis_result.file_path}")
|
|
76
|
+
output_data(f"Package: (default)")
|
|
77
|
+
output_data(f"Lines: {analysis_result.line_count}")
|
|
78
78
|
|
|
79
79
|
element_counts = {}
|
|
80
80
|
for element in analysis_result.elements:
|
|
81
81
|
element_type = getattr(element, "__class__", type(element)).__name__
|
|
82
82
|
element_counts[element_type] = element_counts.get(element_type, 0) + 1
|
|
83
83
|
|
|
84
|
-
output_data(f"
|
|
85
|
-
output_data(f"
|
|
86
|
-
output_data(f"
|
|
87
|
-
output_data(f"
|
|
88
|
-
output_data(f"
|
|
84
|
+
output_data(f"Classes: {element_counts.get('Class', 0)}")
|
|
85
|
+
output_data(f"Methods: {element_counts.get('Function', 0)}")
|
|
86
|
+
output_data(f"Fields: {element_counts.get('Variable', 0)}")
|
|
87
|
+
output_data(f"Imports: {element_counts.get('Import', 0)}")
|
|
88
|
+
output_data(f"Annotations: 0")
|
|
@@ -34,13 +34,13 @@ class BaseCommand(ABC):
|
|
|
34
34
|
def validate_file(self) -> bool:
|
|
35
35
|
"""Validate input file exists and is accessible."""
|
|
36
36
|
if not hasattr(self.args, "file_path") or not self.args.file_path:
|
|
37
|
-
output_error("ERROR:
|
|
37
|
+
output_error("ERROR: File path not specified.")
|
|
38
38
|
return False
|
|
39
39
|
|
|
40
40
|
import os
|
|
41
41
|
|
|
42
42
|
if not os.path.exists(self.args.file_path):
|
|
43
|
-
output_error(f"ERROR:
|
|
43
|
+
output_error(f"ERROR: File not found: {self.args.file_path}")
|
|
44
44
|
return False
|
|
45
45
|
|
|
46
46
|
return True
|
|
@@ -50,18 +50,18 @@ class BaseCommand(ABC):
|
|
|
50
50
|
if hasattr(self.args, "language") and self.args.language:
|
|
51
51
|
target_language = self.args.language.lower()
|
|
52
52
|
if (not hasattr(self.args, "table") or not self.args.table) and (not hasattr(self.args, "quiet") or not self.args.quiet):
|
|
53
|
-
output_info(f"INFO:
|
|
53
|
+
output_info(f"INFO: Language explicitly specified: {target_language}")
|
|
54
54
|
else:
|
|
55
55
|
target_language = detect_language_from_file(self.args.file_path)
|
|
56
56
|
if target_language == "unknown":
|
|
57
57
|
output_error(
|
|
58
|
-
f"ERROR:
|
|
58
|
+
f"ERROR: Could not determine language for file '{self.args.file_path}'."
|
|
59
59
|
)
|
|
60
60
|
return None
|
|
61
61
|
else:
|
|
62
62
|
if (not hasattr(self.args, "table") or not self.args.table) and (not hasattr(self.args, "quiet") or not self.args.quiet):
|
|
63
63
|
output_info(
|
|
64
|
-
f"INFO:
|
|
64
|
+
f"INFO: Language auto-detected from extension: {target_language}"
|
|
65
65
|
)
|
|
66
66
|
|
|
67
67
|
# Language support validation
|
|
@@ -69,7 +69,7 @@ class BaseCommand(ABC):
|
|
|
69
69
|
if target_language != "java":
|
|
70
70
|
if (not hasattr(self.args, "table") or not self.args.table) and (not hasattr(self.args, "quiet") or not self.args.quiet):
|
|
71
71
|
output_info(
|
|
72
|
-
"INFO: Java
|
|
72
|
+
"INFO: Trying with Java analysis engine. May not work correctly."
|
|
73
73
|
)
|
|
74
74
|
target_language = "java" # Fallback
|
|
75
75
|
|
|
@@ -89,10 +89,10 @@ class BaseCommand(ABC):
|
|
|
89
89
|
end_column=getattr(self.args, 'end_column', None)
|
|
90
90
|
)
|
|
91
91
|
if partial_content is None:
|
|
92
|
-
output_error("ERROR:
|
|
92
|
+
output_error("ERROR: Failed to read file partially")
|
|
93
93
|
return None
|
|
94
94
|
except Exception as e:
|
|
95
|
-
output_error(f"ERROR:
|
|
95
|
+
output_error(f"ERROR: Failed to read file partially: {e}")
|
|
96
96
|
return None
|
|
97
97
|
|
|
98
98
|
request = AnalysisRequest(
|
|
@@ -109,13 +109,13 @@ class BaseCommand(ABC):
|
|
|
109
109
|
if analysis_result
|
|
110
110
|
else "Unknown error"
|
|
111
111
|
)
|
|
112
|
-
output_error(f"ERROR:
|
|
112
|
+
output_error(f"ERROR: Analysis failed: {error_msg}")
|
|
113
113
|
return None
|
|
114
114
|
|
|
115
115
|
return analysis_result
|
|
116
116
|
|
|
117
117
|
except Exception as e:
|
|
118
|
-
output_error(f"ERROR:
|
|
118
|
+
output_error(f"ERROR: An error occurred during analysis: {e}")
|
|
119
119
|
return None
|
|
120
120
|
|
|
121
121
|
def execute(self) -> int:
|
|
@@ -138,7 +138,7 @@ class BaseCommand(ABC):
|
|
|
138
138
|
try:
|
|
139
139
|
return asyncio.run(self.execute_async(language))
|
|
140
140
|
except Exception as e:
|
|
141
|
-
output_error(f"ERROR:
|
|
141
|
+
output_error(f"ERROR: An error occurred during command execution: {e}")
|
|
142
142
|
return 1
|
|
143
143
|
|
|
144
144
|
@abstractmethod
|
|
@@ -28,14 +28,14 @@ class PartialReadCommand(BaseCommand):
|
|
|
28
28
|
"""Validate input file exists and is accessible."""
|
|
29
29
|
if not hasattr(self.args, "file_path") or not self.args.file_path:
|
|
30
30
|
from ...output_manager import output_error
|
|
31
|
-
output_error("ERROR:
|
|
31
|
+
output_error("ERROR: File path not specified.")
|
|
32
32
|
return False
|
|
33
33
|
|
|
34
34
|
import os
|
|
35
35
|
|
|
36
36
|
if not os.path.exists(self.args.file_path):
|
|
37
37
|
from ...output_manager import output_error
|
|
38
|
-
output_error(f"ERROR:
|
|
38
|
+
output_error(f"ERROR: File not found: {self.args.file_path}")
|
|
39
39
|
return False
|
|
40
40
|
|
|
41
41
|
return True
|
|
@@ -54,17 +54,17 @@ class PartialReadCommand(BaseCommand):
|
|
|
54
54
|
# Validate partial read arguments
|
|
55
55
|
if not self.args.start_line:
|
|
56
56
|
from ...output_manager import output_error
|
|
57
|
-
output_error("ERROR: --start-line
|
|
57
|
+
output_error("ERROR: --start-line is required")
|
|
58
58
|
return 1
|
|
59
59
|
|
|
60
60
|
if self.args.start_line < 1:
|
|
61
61
|
from ...output_manager import output_error
|
|
62
|
-
output_error("ERROR: --start-line
|
|
62
|
+
output_error("ERROR: --start-line must be 1 or greater")
|
|
63
63
|
return 1
|
|
64
64
|
|
|
65
65
|
if self.args.end_line and self.args.end_line < self.args.start_line:
|
|
66
66
|
from ...output_manager import output_error
|
|
67
|
-
output_error("ERROR: --end-line
|
|
67
|
+
output_error("ERROR: --end-line must be greater than or equal to --start-line")
|
|
68
68
|
return 1
|
|
69
69
|
|
|
70
70
|
# Read partial content
|
|
@@ -79,7 +79,7 @@ class PartialReadCommand(BaseCommand):
|
|
|
79
79
|
|
|
80
80
|
if partial_content is None:
|
|
81
81
|
from ...output_manager import output_error
|
|
82
|
-
output_error("ERROR:
|
|
82
|
+
output_error("ERROR: Failed to read file partially")
|
|
83
83
|
return 1
|
|
84
84
|
|
|
85
85
|
# Output the result
|
|
@@ -88,7 +88,7 @@ class PartialReadCommand(BaseCommand):
|
|
|
88
88
|
|
|
89
89
|
except Exception as e:
|
|
90
90
|
from ...output_manager import output_error
|
|
91
|
-
output_error(f"ERROR:
|
|
91
|
+
output_error(f"ERROR: Failed to read file partially: {e}")
|
|
92
92
|
return 1
|
|
93
93
|
|
|
94
94
|
def _output_partial_content(self, content: str) -> None:
|
|
@@ -107,7 +107,7 @@ class PartialReadCommand(BaseCommand):
|
|
|
107
107
|
}
|
|
108
108
|
|
|
109
109
|
# Build range info for header
|
|
110
|
-
range_info = f"
|
|
110
|
+
range_info = f"Line {self.args.start_line}"
|
|
111
111
|
if hasattr(self.args, 'end_line') and self.args.end_line:
|
|
112
112
|
range_info += f"-{self.args.end_line}"
|
|
113
113
|
|
|
@@ -119,10 +119,10 @@ class PartialReadCommand(BaseCommand):
|
|
|
119
119
|
output_json(result_data)
|
|
120
120
|
else:
|
|
121
121
|
# Human-readable format with header
|
|
122
|
-
output_section("
|
|
123
|
-
output_data(f"
|
|
124
|
-
output_data(f"
|
|
125
|
-
output_data(f"
|
|
122
|
+
output_section("Partial Read Result")
|
|
123
|
+
output_data(f"File: {self.args.file_path}")
|
|
124
|
+
output_data(f"Range: {range_info}")
|
|
125
|
+
output_data(f"Characters read: {len(content)}")
|
|
126
126
|
output_data("") # Empty line for separation
|
|
127
127
|
|
|
128
128
|
# Output the actual content
|
|
@@ -27,7 +27,7 @@ class StructureCommand(BaseCommand):
|
|
|
27
27
|
|
|
28
28
|
def _output_structure_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
29
|
"""Output structure analysis results with appropriate Japanese header."""
|
|
30
|
-
output_section("
|
|
30
|
+
output_section("Structure Analysis Results")
|
|
31
31
|
|
|
32
32
|
# Convert to legacy structure format expected by tests
|
|
33
33
|
structure_dict = self._convert_to_legacy_format(analysis_result)
|
|
@@ -91,31 +91,31 @@ class StructureCommand(BaseCommand):
|
|
|
91
91
|
|
|
92
92
|
def _output_text_format(self, structure_dict: dict) -> None:
|
|
93
93
|
"""Output structure analysis in human-readable text format."""
|
|
94
|
-
output_data(f"
|
|
95
|
-
output_data(f"
|
|
94
|
+
output_data(f"File: {structure_dict['file_path']}")
|
|
95
|
+
output_data(f"Language: {structure_dict['language']}")
|
|
96
96
|
|
|
97
97
|
if structure_dict['package']:
|
|
98
|
-
output_data(f"
|
|
98
|
+
output_data(f"Package: {structure_dict['package']['name']}")
|
|
99
99
|
|
|
100
100
|
stats = structure_dict['statistics']
|
|
101
|
-
output_data(f"
|
|
102
|
-
output_data(f"
|
|
103
|
-
output_data(f"
|
|
104
|
-
output_data(f"
|
|
105
|
-
output_data(f"
|
|
106
|
-
output_data(f"
|
|
101
|
+
output_data(f"Statistics:")
|
|
102
|
+
output_data(f" Classes: {stats['class_count']}")
|
|
103
|
+
output_data(f" Methods: {stats['method_count']}")
|
|
104
|
+
output_data(f" Fields: {stats['field_count']}")
|
|
105
|
+
output_data(f" Imports: {stats['import_count']}")
|
|
106
|
+
output_data(f" Total lines: {stats['total_lines']}")
|
|
107
107
|
|
|
108
108
|
if structure_dict['classes']:
|
|
109
|
-
output_data("
|
|
109
|
+
output_data("Classes:")
|
|
110
110
|
for cls in structure_dict['classes']:
|
|
111
111
|
output_data(f" - {cls['name']}")
|
|
112
112
|
|
|
113
113
|
if structure_dict['methods']:
|
|
114
|
-
output_data("
|
|
114
|
+
output_data("Methods:")
|
|
115
115
|
for method in structure_dict['methods']:
|
|
116
116
|
output_data(f" - {method['name']}")
|
|
117
117
|
|
|
118
118
|
if structure_dict['fields']:
|
|
119
|
-
output_data("
|
|
119
|
+
output_data("Fields:")
|
|
120
120
|
for field in structure_dict['fields']:
|
|
121
121
|
output_data(f" - {field['name']}")
|
|
@@ -27,7 +27,7 @@ class SummaryCommand(BaseCommand):
|
|
|
27
27
|
|
|
28
28
|
def _output_summary_analysis(self, analysis_result: "AnalysisResult") -> None:
|
|
29
29
|
"""Output summary analysis results."""
|
|
30
|
-
output_section("
|
|
30
|
+
output_section("Summary Results")
|
|
31
31
|
|
|
32
32
|
# Get summary types from args (default: classes,methods)
|
|
33
33
|
summary_types = getattr(self.args, 'summary', 'classes,methods')
|
|
@@ -75,19 +75,19 @@ class SummaryCommand(BaseCommand):
|
|
|
75
75
|
|
|
76
76
|
def _output_text_format(self, summary_data: dict, requested_types: list) -> None:
|
|
77
77
|
"""Output summary in human-readable text format."""
|
|
78
|
-
output_data(f"
|
|
79
|
-
output_data(f"
|
|
78
|
+
output_data(f"File: {summary_data['file_path']}")
|
|
79
|
+
output_data(f"Language: {summary_data['language']}")
|
|
80
80
|
|
|
81
81
|
for element_type in requested_types:
|
|
82
82
|
if element_type in summary_data['summary']:
|
|
83
83
|
elements = summary_data['summary'][element_type]
|
|
84
84
|
type_name_map = {
|
|
85
|
-
'classes': '
|
|
86
|
-
'methods': '
|
|
87
|
-
'fields': '
|
|
88
|
-
'imports': '
|
|
85
|
+
'classes': 'Classes',
|
|
86
|
+
'methods': 'Methods',
|
|
87
|
+
'fields': 'Fields',
|
|
88
|
+
'imports': 'Imports'
|
|
89
89
|
}
|
|
90
90
|
type_name = type_name_map.get(element_type, element_type)
|
|
91
|
-
output_data(f"\n{type_name} ({len(elements)}
|
|
91
|
+
output_data(f"\n{type_name} ({len(elements)} items):")
|
|
92
92
|
for element in elements:
|
|
93
93
|
output_data(f" - {element['name']}")
|
tree_sitter_analyzer/cli_main.py
CHANGED
|
@@ -183,23 +183,23 @@ def handle_special_commands(args: argparse.Namespace) -> Optional[int]:
|
|
|
183
183
|
# Validate partial read options
|
|
184
184
|
if hasattr(args, 'partial_read') and args.partial_read:
|
|
185
185
|
if args.start_line is None:
|
|
186
|
-
output_error("ERROR: --start-line
|
|
186
|
+
output_error("ERROR: --start-line is required")
|
|
187
187
|
return 1
|
|
188
188
|
|
|
189
189
|
if args.start_line < 1:
|
|
190
|
-
output_error("ERROR: --start-line
|
|
190
|
+
output_error("ERROR: --start-line must be 1 or greater")
|
|
191
191
|
return 1
|
|
192
192
|
|
|
193
193
|
if args.end_line and args.end_line < args.start_line:
|
|
194
|
-
output_error("ERROR: --end-line
|
|
194
|
+
output_error("ERROR: --end-line must be greater than or equal to --start-line")
|
|
195
195
|
return 1
|
|
196
196
|
|
|
197
197
|
if args.start_column is not None and args.start_column < 0:
|
|
198
|
-
output_error("ERROR: --start-column
|
|
198
|
+
output_error("ERROR: --start-column must be 0 or greater")
|
|
199
199
|
return 1
|
|
200
200
|
|
|
201
201
|
if args.end_column is not None and args.end_column < 0:
|
|
202
|
-
output_error("ERROR: --end-column
|
|
202
|
+
output_error("ERROR: --end-column must be 0 or greater")
|
|
203
203
|
return 1
|
|
204
204
|
|
|
205
205
|
# Query language commands
|
|
@@ -258,7 +258,7 @@ def main() -> None:
|
|
|
258
258
|
sys.exit(exit_code)
|
|
259
259
|
else:
|
|
260
260
|
if not args.file_path:
|
|
261
|
-
output_error("ERROR:
|
|
261
|
+
output_error("ERROR: File path not specified.")
|
|
262
262
|
else:
|
|
263
263
|
output_error("ERROR: 実行可能なコマンドが指定されていません。")
|
|
264
264
|
parser.print_help()
|
|
@@ -158,16 +158,16 @@ class ReadPartialTool:
|
|
|
158
158
|
json_output = json.dumps(result_data, indent=2, ensure_ascii=False)
|
|
159
159
|
|
|
160
160
|
# Build range info for header
|
|
161
|
-
range_info = f"
|
|
161
|
+
range_info = f"Line {start_line}"
|
|
162
162
|
if end_line:
|
|
163
163
|
range_info += f"-{end_line}"
|
|
164
164
|
|
|
165
165
|
# Build CLI-compatible output with header and JSON (without log message)
|
|
166
166
|
cli_output = (
|
|
167
|
-
f"---
|
|
168
|
-
f"
|
|
169
|
-
f"
|
|
170
|
-
f"
|
|
167
|
+
f"--- Partial Read Result ---\n"
|
|
168
|
+
f"File: {file_path}\n"
|
|
169
|
+
f"Range: {range_info}\n"
|
|
170
|
+
f"Characters read: {len(content)}\n"
|
|
171
171
|
f"{json_output}"
|
|
172
172
|
)
|
|
173
173
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tree-sitter-analyzer
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Extensible multi-language code analyzer framework using Tree-sitter with dynamic plugin architecture
|
|
5
5
|
Project-URL: Homepage, https://github.com/aimasteracc/tree-sitter-analyzer
|
|
6
6
|
Project-URL: Documentation, https://github.com/aimasteracc/tree-sitter-analyzer#readme
|
|
@@ -213,7 +213,7 @@ uv run python -m tree_sitter_analyzer examples/Sample.java --table=full
|
|
|
213
213
|
>2025-07-30 16:57:47,922 - tree_sitter_analyzer - INFO - Plugin registered for language: python
|
|
214
214
|
>2025-07-30 16:57:47,922 - tree_sitter_analyzer - INFO - Successfully loaded 3 language plugins: java, javascript, python
|
|
215
215
|
>2025-07-30 16:57:47,923 - tree_sitter_analyzer - INFO - UnifiedAnalysisEngine initialized
|
|
216
|
-
>INFO:
|
|
216
|
+
>INFO: Language auto-detected from extension: java
|
|
217
217
|
>2025-07-30 16:57:47,925 - tree_sitter_analyzer - INFO - Starting analysis for examples/Sample.java
|
|
218
218
|
>2025-07-30 16:57:47,945 - tree_sitter_analyzer.core.parser - INFO - Parser initialized successfully
|
|
219
219
|
>2025-07-30 16:57:47,951 - PERF - analyze_java: 0.0253s - Operation completed
|
|
@@ -221,15 +221,15 @@ uv run python -m tree_sitter_analyzer examples/Sample.java --table=full
|
|
|
221
221
|
>2025-07-30 16:57:47,958 - PERF - unified_analysis: 0.0253s - Analyzed examples/Sample.java (java)
|
|
222
222
|
>2025-07-30 16:57:47,958 - tree_sitter_analyzer.performance - INFO - unified_analysis: 0.0253s - Analyzed examples/Sample.java (java)
|
|
223
223
|
>
|
|
224
|
-
>---
|
|
225
|
-
>"
|
|
226
|
-
>"
|
|
227
|
-
>"
|
|
228
|
-
>"
|
|
229
|
-
>"
|
|
230
|
-
>"
|
|
231
|
-
>"
|
|
232
|
-
>"
|
|
224
|
+
>--- Advanced Analysis Results ---
|
|
225
|
+
>"File: examples/Sample.java"
|
|
226
|
+
>"Package: (default)"
|
|
227
|
+
>"Lines: 178"
|
|
228
|
+
>"Classes: 8"
|
|
229
|
+
>"Methods: 24"
|
|
230
|
+
>"Fields: 5"
|
|
231
|
+
>"Imports: 2"
|
|
232
|
+
>"Annotations: 0"
|
|
233
233
|
>```
|
|
234
234
|
|
|
235
235
|
**Partial Code Extraction (`--partial-read`):**
|
|
@@ -419,7 +419,7 @@ Add to your Claude Desktop config file:
|
|
|
419
419
|
```
|
|
420
420
|
>```json
|
|
421
421
|
>{
|
|
422
|
-
> "partial_content_result": "---
|
|
422
|
+
> "partial_content_result": "--- Partial Read Result ---\nFile: examples/Sample.java\nRange: Line 84-86\nCharacters read: 117\n{\n \"file_path\": >\"examples/Sample.java\",\n \"range\": {\n \"start_line\": 84,\n \"end_line\": 86,\n \"start_column\": null,\n \"end_column\": >null\n },\n \"content\": \" public void innerMethod() {\\n System.out.println(\\\"Inner class method, value: \\\" + >value);\\n }\\n\",\n \"content_length\": 117\n}"
|
|
423
423
|
>}
|
|
424
424
|
|
|
425
425
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
tree_sitter_analyzer/__init__.py,sha256=
|
|
1
|
+
tree_sitter_analyzer/__init__.py,sha256=XEH63rciKeejwuvPKCWf332w3HSH_ZtqkX4QVzW5QPA,3003
|
|
2
2
|
tree_sitter_analyzer/__main__.py,sha256=25E8WFUzTFAYqwT5Pdlq6cn8Aa25ogHP3w_5ybbHbNc,317
|
|
3
3
|
tree_sitter_analyzer/api.py,sha256=-Np6khuxbzBA_T2FN-Z8WDEXz7260OcwKIMP3iz2Kaw,17870
|
|
4
|
-
tree_sitter_analyzer/cli_main.py,sha256=
|
|
4
|
+
tree_sitter_analyzer/cli_main.py,sha256=WTP77Shnaa9yTKy0bMVLUj2i3xtLbIaDDOlUYbS2cGc,9656
|
|
5
5
|
tree_sitter_analyzer/encoding_utils.py,sha256=bgzyec3Rm1oxyRpTmXS_B5GOcA6rU54YCPRxv-2Z3q0,15017
|
|
6
6
|
tree_sitter_analyzer/exceptions.py,sha256=s--JRYJXH8Ui7ChtWuMS4npM4V0-L3aW831xQO5v6RM,10219
|
|
7
7
|
tree_sitter_analyzer/file_handler.py,sha256=P2S0AOCeo1mzVj_gDrntCSWGt9E3cflSfaxsPRx2EPM,7620
|
|
@@ -17,13 +17,13 @@ tree_sitter_analyzer/cli/__init__.py,sha256=zCYwQW0hKFfZ4w-qoSOnqVKZGtdZ-ziH60Ax
|
|
|
17
17
|
tree_sitter_analyzer/cli/__main__.py,sha256=Sa02Ye57FhkDVTlGrb6U3m9V6II_TIuyzoQIwZtBkZ0,254
|
|
18
18
|
tree_sitter_analyzer/cli/info_commands.py,sha256=B9fBryAJ2Ctt-wo8Tko86BKOfFCCBPhAWz9vz_3r1fs,4521
|
|
19
19
|
tree_sitter_analyzer/cli/commands/__init__.py,sha256=duw37uHXggCRf08vaGQeY4dy7krZrc7YpXBNmlzhSQw,722
|
|
20
|
-
tree_sitter_analyzer/cli/commands/advanced_command.py,sha256=
|
|
21
|
-
tree_sitter_analyzer/cli/commands/base_command.py,sha256=
|
|
20
|
+
tree_sitter_analyzer/cli/commands/advanced_command.py,sha256=aFxkvksJr7FtzKNJebS4SqCySjJQunUnp_7SVzeU29E,3521
|
|
21
|
+
tree_sitter_analyzer/cli/commands/base_command.py,sha256=GrKQg_bTZuvuaS_fdzxGbhOE6JC1bMddrVMBz-qe6yo,5845
|
|
22
22
|
tree_sitter_analyzer/cli/commands/default_command.py,sha256=lRy3WnCYZe3G4wpz4ZXyDc_wHByCHBhB0kdLdPJbKt0,601
|
|
23
|
-
tree_sitter_analyzer/cli/commands/partial_read_command.py,sha256=
|
|
23
|
+
tree_sitter_analyzer/cli/commands/partial_read_command.py,sha256=OnQVk5inXeF07ClsOrMvyw80aP__MbWJ0f1ZvPaAV7M,4807
|
|
24
24
|
tree_sitter_analyzer/cli/commands/query_command.py,sha256=Z3QFVD8pYauW7oUHr-X6zZqUM_GYfBXSNYdSqyEjHP8,3294
|
|
25
|
-
tree_sitter_analyzer/cli/commands/structure_command.py,sha256=
|
|
26
|
-
tree_sitter_analyzer/cli/commands/summary_command.py,sha256=
|
|
25
|
+
tree_sitter_analyzer/cli/commands/structure_command.py,sha256=rwG87-EIjiqRfaoKGhH3mNBtr7DL9E0qF311oKHvcvU,5171
|
|
26
|
+
tree_sitter_analyzer/cli/commands/summary_command.py,sha256=la0YEEYibmspHvIrvUSiNS45grEkHgaIC-XH1N5dIc8,3690
|
|
27
27
|
tree_sitter_analyzer/cli/commands/table_command.py,sha256=vehgW7nEIF5x_ghUHytdQX5pl7_uKO1Qwmwj4OjFFx0,9604
|
|
28
28
|
tree_sitter_analyzer/core/__init__.py,sha256=raWtpBZJFxo_G1q3WJxdzmcbQK0TqqDycm-GLrjv4OA,479
|
|
29
29
|
tree_sitter_analyzer/core/analysis_engine.py,sha256=5xwGRd-Blkyp3M4CIkFHbD6jULm3bbnv2Hn15fRyk-8,21256
|
|
@@ -53,7 +53,7 @@ tree_sitter_analyzer/mcp/tools/__init__.py,sha256=MBX4-SX8gfUyEGs_BlsOD4toRosvw-
|
|
|
53
53
|
tree_sitter_analyzer/mcp/tools/analyze_scale_tool.py,sha256=Bya1dsy4a-aTzyYSI32NDUgMbIVCFWpYXMtloGHx1V0,23448
|
|
54
54
|
tree_sitter_analyzer/mcp/tools/analyze_scale_tool_cli_compatible.py,sha256=xwXuy72FEfoY2TW6URddfdS9Ha_lq8_ZgG0UxC26mLM,8954
|
|
55
55
|
tree_sitter_analyzer/mcp/tools/base_tool.py,sha256=LpY_QPWbpm8ZGe3SPK7TIBFZMiwkUMpHa8zcswld2ag,1295
|
|
56
|
-
tree_sitter_analyzer/mcp/tools/read_partial_tool.py,sha256=
|
|
56
|
+
tree_sitter_analyzer/mcp/tools/read_partial_tool.py,sha256=m71g5RjNkBf-DxXhHGYsRsID5fYfDFs53-2bGZ_C4sQ,11097
|
|
57
57
|
tree_sitter_analyzer/mcp/tools/table_format_tool.py,sha256=IUF7N5Je_rV88RkEVz5wet37PZLaDdzXoAC5j1xI3EI,14428
|
|
58
58
|
tree_sitter_analyzer/mcp/tools/universal_analyze_tool.py,sha256=asH_2BLzT-ACLLampyHF61WFn5nSVKL94wE0mU-uui8,19635
|
|
59
59
|
tree_sitter_analyzer/mcp/utils/__init__.py,sha256=f1WkdJ3XNWgh2NDN0LGNdwk5O-ChqEvnOQ-mINlHPG8,3064
|
|
@@ -71,7 +71,7 @@ tree_sitter_analyzer/queries/java.py,sha256=hmaj7jKQ_m9nmOAnyiWQhzH-6g41xIi3fwt5
|
|
|
71
71
|
tree_sitter_analyzer/queries/javascript.py,sha256=TSe6uSHhBuQU0r2B8YBqpEYkU4q9CYRuTUqRK0WfM5o,4183
|
|
72
72
|
tree_sitter_analyzer/queries/python.py,sha256=V5MsKmI9A_FqAT2PKkrSL_Xp9bGKBUSpyVPoBmLxxWg,8018
|
|
73
73
|
tree_sitter_analyzer/queries/typescript.py,sha256=T8a9PwqqGkwLr8clVsAfu0IUIrLKH8u4sBqlU1Cz-FE,7138
|
|
74
|
-
tree_sitter_analyzer-0.1.
|
|
75
|
-
tree_sitter_analyzer-0.1.
|
|
76
|
-
tree_sitter_analyzer-0.1.
|
|
77
|
-
tree_sitter_analyzer-0.1.
|
|
74
|
+
tree_sitter_analyzer-0.1.3.dist-info/METADATA,sha256=E-0pK1kOkkAOPLmmHWHcTXUgUzje5VHUTZU6FYw-G6A,18344
|
|
75
|
+
tree_sitter_analyzer-0.1.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
76
|
+
tree_sitter_analyzer-0.1.3.dist-info/entry_points.txt,sha256=-XEh1racqnCT30mhKWMv5-bgX0iqd_J6b08lZS9J4eg,336
|
|
77
|
+
tree_sitter_analyzer-0.1.3.dist-info/RECORD,,
|
|
File without changes
|
{tree_sitter_analyzer-0.1.2.dist-info → tree_sitter_analyzer-0.1.3.dist-info}/entry_points.txt
RENAMED
|
File without changes
|