codebrain 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codebrain/__init__.py +3 -0
- codebrain/__main__.py +6 -0
- codebrain/agent_bridge.py +162 -0
- codebrain/analyzer.py +943 -0
- codebrain/api.py +578 -0
- codebrain/api_models.py +102 -0
- codebrain/cli.py +1927 -0
- codebrain/comprehension.py +1939 -0
- codebrain/config.py +46 -0
- codebrain/context.py +276 -0
- codebrain/export.py +334 -0
- codebrain/graph/__init__.py +0 -0
- codebrain/graph/query.py +656 -0
- codebrain/graph/schema.py +113 -0
- codebrain/graph/store.py +295 -0
- codebrain/hook_runner.py +71 -0
- codebrain/hooks.py +107 -0
- codebrain/indexer.py +450 -0
- codebrain/llm.py +676 -0
- codebrain/logging.py +42 -0
- codebrain/mcp_server.py +1635 -0
- codebrain/memory/__init__.py +5 -0
- codebrain/memory/store.py +270 -0
- codebrain/parser/__init__.py +0 -0
- codebrain/parser/base.py +27 -0
- codebrain/parser/config_parser.py +228 -0
- codebrain/parser/models.py +44 -0
- codebrain/parser/python_parser.py +658 -0
- codebrain/parser/registry.py +144 -0
- codebrain/parser/typescript_parser.py +1189 -0
- codebrain/parser/typescript_treesitter.py +535 -0
- codebrain/py.typed +0 -0
- codebrain/resolver.py +171 -0
- codebrain/settings.py +88 -0
- codebrain/utils.py +59 -0
- codebrain/validator.py +563 -0
- codebrain/watcher/__init__.py +0 -0
- codebrain/watcher/file_watcher.py +173 -0
- codebrain-0.1.0.dist-info/METADATA +360 -0
- codebrain-0.1.0.dist-info/RECORD +44 -0
- codebrain-0.1.0.dist-info/WHEEL +5 -0
- codebrain-0.1.0.dist-info/entry_points.txt +6 -0
- codebrain-0.1.0.dist-info/licenses/LICENSE +21 -0
- codebrain-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1189 @@
|
|
|
1
|
+
"""TypeScript/JavaScript structural parser.
|
|
2
|
+
|
|
3
|
+
Enhanced regex-based extraction for .ts, .tsx, .js, and .jsx files.
|
|
4
|
+
Captures: functions, classes, interfaces, types, enums, imports, exports,
|
|
5
|
+
re-exports, decorators, namespaces, React components, hooks, API calls,
|
|
6
|
+
navigation, and module-level variables.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import re
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from codebrain.parser.base import BaseParser
|
|
15
|
+
from codebrain.parser.models import ParsedEdge, ParsedFile, ParsedNode
|
|
16
|
+
from codebrain.utils import content_hash
|
|
17
|
+
|
|
18
|
+
# ---------------------------------------------------------------------------
|
|
19
|
+
# Regex patterns
|
|
20
|
+
# ---------------------------------------------------------------------------
|
|
21
|
+
|
|
22
|
+
# Imports
|
|
23
|
+
_IMPORT_DEFAULT = re.compile(
|
|
24
|
+
r"""import\s+(\w+)\s+from\s+['"]([^'"]+)['"]""",
|
|
25
|
+
)
|
|
26
|
+
_IMPORT_NAMED = re.compile(
|
|
27
|
+
r"""import\s+\{([^}]+)\}\s+from\s+['"]([^'"]+)['"]""",
|
|
28
|
+
)
|
|
29
|
+
_IMPORT_STAR = re.compile(
|
|
30
|
+
r"""import\s+\*\s+as\s+(\w+)\s+from\s+['"]([^'"]+)['"]""",
|
|
31
|
+
)
|
|
32
|
+
_REQUIRE = re.compile(
|
|
33
|
+
r"""(?:const|let|var)\s+(?:(\w+)|\{([^}]+)\})\s*=\s*require\s*\(\s*['"]([^'"]+)['"]\s*\)""",
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Type imports: import type { X } from "y"
|
|
37
|
+
_IMPORT_TYPE = re.compile(
|
|
38
|
+
r"""import\s+type\s+\{([^}]+)\}\s+from\s+['"]([^'"]+)['"]""",
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# Re-exports: export { X, Y } from "./module", export * from "./module"
|
|
42
|
+
_REEXPORT_NAMED = re.compile(
|
|
43
|
+
r"""^export\s+(?:type\s+)?\{([^}]+)\}\s+from\s+['"]([^'"]+)['"]""",
|
|
44
|
+
re.MULTILINE,
|
|
45
|
+
)
|
|
46
|
+
_REEXPORT_STAR = re.compile(
|
|
47
|
+
r"""^export\s+\*\s+(?:as\s+(\w+)\s+)?from\s+['"]([^'"]+)['"]""",
|
|
48
|
+
re.MULTILINE,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Decorators: @DecoratorName or @decorator(args)
|
|
52
|
+
_DECORATOR = re.compile(
|
|
53
|
+
r"""^(\s*)@(\w[\w.]*)\s*(?:\([^)]*\))?""",
|
|
54
|
+
re.MULTILINE,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Functions — generics use a permissive match that allows nested <> via [^(]* up to the opening paren
|
|
58
|
+
_FUNCTION_DECL = re.compile(
|
|
59
|
+
r"""^(?:export\s+)?(?:default\s+)?(?:async\s+)?function\s*\*?\s+(\w+)\s*(<[^(]*>)?\s*\(([^)]*)\)(?:\s*:\s*([^\n{]+))?\s*\{""",
|
|
60
|
+
re.MULTILINE,
|
|
61
|
+
)
|
|
62
|
+
_ARROW_CONST = re.compile(
|
|
63
|
+
r"""^\s*(?:export\s+)?(?:const|let|var)\s+(\w+)\s*(?::\s*[^=]+?)?\s*=\s*(?:async\s+)?(?:<[^>]*>\s*)?(?:\((?:[^)(]*|\([^)]*\))*\)|(\w+))\s*(?::\s*[^=]+?)?\s*=>""",
|
|
64
|
+
re.MULTILINE,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
# Classes
|
|
68
|
+
_CLASS_DECL = re.compile(
|
|
69
|
+
r"""^(?:export\s+)?(?:default\s+)?(?:abstract\s+)?class\s+(\w+)(?:\s+extends\s+(\w[\w.]*))?(?:\s+implements\s+([\w.,\s]+))?\s*\{""",
|
|
70
|
+
re.MULTILINE,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Methods inside classes
|
|
74
|
+
_METHOD_DECL = re.compile(
|
|
75
|
+
r"""^\s+(?:(?:public|private|protected|static|readonly|abstract|async|override|get|set)\s+)*(\w+)\s*(<[^>]*>)?\s*\(([^)]*)\)(?:\s*:\s*([^\n{;]+))?\s*[{;]""",
|
|
76
|
+
re.MULTILINE,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Interfaces
|
|
80
|
+
_INTERFACE_DECL = re.compile(
|
|
81
|
+
r"""^(?:export\s+)?interface\s+(\w+)(?:\s+extends\s+([\w.,\s]+))?\s*\{""",
|
|
82
|
+
re.MULTILINE,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# Type aliases
|
|
86
|
+
_TYPE_ALIAS = re.compile(
|
|
87
|
+
r"""^(?:export\s+)?type\s+(\w+)(?:<[^>]*>)?\s*=""",
|
|
88
|
+
re.MULTILINE,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Enum
|
|
92
|
+
_ENUM_DECL = re.compile(
|
|
93
|
+
r"""^(?:export\s+)?(?:const\s+)?enum\s+(\w+)\s*\{""",
|
|
94
|
+
re.MULTILINE,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Namespace / module declarations
|
|
98
|
+
_NAMESPACE_DECL = re.compile(
|
|
99
|
+
r"""^(?:export\s+)?(?:declare\s+)?(?:namespace|module)\s+(\w+)\s*\{""",
|
|
100
|
+
re.MULTILINE,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Exports
|
|
104
|
+
_EXPORT_DEFAULT = re.compile(
|
|
105
|
+
r"""^export\s+default\s+(?:class|function|abstract\s+class)\s+(\w+)""",
|
|
106
|
+
re.MULTILINE,
|
|
107
|
+
)
|
|
108
|
+
_EXPORT_NAMED = re.compile(
|
|
109
|
+
r"""^export\s+(?:const|let|var|function|class|interface|type|enum|abstract\s+class|async\s+function)""",
|
|
110
|
+
re.MULTILINE,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Object property methods: { methodName() { ... } } or { methodName: function() { ... } }
|
|
114
|
+
_OBJ_METHOD = re.compile(
|
|
115
|
+
r"""^\s+(\w+)\s*\(([^)]*)\)\s*\{""",
|
|
116
|
+
re.MULTILINE,
|
|
117
|
+
)
|
|
118
|
+
_OBJ_METHOD_ARROW = re.compile(
|
|
119
|
+
r"""^\s+(\w+)\s*:\s*(?:async\s+)?(?:\([^)]*\)|(\w+))\s*=>""",
|
|
120
|
+
re.MULTILINE,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Nested functions inside function bodies
|
|
124
|
+
_NESTED_FUNCTION = re.compile(
|
|
125
|
+
r"""(?:^|\n)\s+(?:async\s+)?function\s+(\w+)\s*\(([^)]*)\)\s*\{""",
|
|
126
|
+
)
|
|
127
|
+
_NESTED_ARROW = re.compile(
|
|
128
|
+
r"""\s+(?:const|let|var)\s+(\w+)\s*=\s*(?:async\s+)?(?:\([^)]*\)|(\w+))\s*=>""",
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Function calls (simple heuristic)
|
|
132
|
+
_CALL_EXPR = re.compile(
|
|
133
|
+
r"""(?<!\w)(\w[\w.]*)\s*\(""",
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
# Module-level const/let/var
|
|
137
|
+
_VARIABLE_DECL = re.compile(
|
|
138
|
+
r"""^\s*(?:export\s+)?(?:const|let|var)\s+(\w+)\s*(?::\s*([^=\n]+))?\s*=""",
|
|
139
|
+
re.MULTILINE,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# React component wrappers: React.memo, memo, forwardRef, React.forwardRef, connect, styled
|
|
143
|
+
_REACT_WRAPPER = re.compile(
|
|
144
|
+
r"""^(?:export\s+)?(?:const|let|var)\s+(\w+)\s*(?::\s*[^=]+?)?\s*=\s*(?:React\.)?(?:memo|forwardRef|lazy)\s*\(""",
|
|
145
|
+
re.MULTILINE,
|
|
146
|
+
)
|
|
147
|
+
_REACT_HOC = re.compile(
|
|
148
|
+
r"""^(?:export\s+)?(?:const|let|var)\s+(\w+)\s*=\s*(?:connect|withRouter|withNavigation|withTheme|styled|observer)\s*\(""",
|
|
149
|
+
re.MULTILINE,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# React hooks: useState, useEffect, useCallback, useMemo, useRef, custom hooks
|
|
153
|
+
_HOOK_CALL = re.compile(
|
|
154
|
+
r"""(?<!\w)(use[A-Z]\w*)\s*\(""",
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# JSX component references: <ComponentName or <Component.Sub
|
|
158
|
+
_JSX_COMPONENT = re.compile(
|
|
159
|
+
r"""<([A-Z]\w*(?:\.\w+)?)\s""",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# API calls: fetch, axios, api.get/post/put/delete
|
|
163
|
+
_API_CALL = re.compile(
|
|
164
|
+
r"""(?<!\w)(?:fetch|axios(?:\.(?:get|post|put|delete|patch))?|api\.(?:get|post|put|delete|patch))\s*\(\s*[`'"]([^`'"]+)[`'"]""",
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Navigation: createStackNavigator, createBottomTabNavigator, Screen name=
|
|
168
|
+
_NAV_SCREEN = re.compile(
|
|
169
|
+
r"""<\w+\.Screen\s+[^>]*name\s*=\s*['"]\s*(\w+)\s*['"]""",
|
|
170
|
+
)
|
|
171
|
+
_NAV_NAVIGATOR = re.compile(
|
|
172
|
+
r"""(?:const|let|var)\s+(\w+)\s*=\s*create\w*Navigator\s*\(""",
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Keywords to skip in call extraction
|
|
176
|
+
_CALL_SKIP = frozenset({
|
|
177
|
+
"if", "for", "while", "switch", "catch", "return", "new", "typeof",
|
|
178
|
+
"instanceof", "import", "require", "from", "throw", "delete", "void",
|
|
179
|
+
"yield", "await", "case", "else", "try", "finally", "class", "function",
|
|
180
|
+
"const", "let", "var", "export", "default", "extends", "implements",
|
|
181
|
+
"true", "false", "null", "undefined", "this", "super",
|
|
182
|
+
# Common JS built-ins that add noise
|
|
183
|
+
"console", "console.log", "console.warn", "console.error",
|
|
184
|
+
"JSON.stringify", "JSON.parse", "Object.keys", "Object.values",
|
|
185
|
+
"Object.assign", "Object.entries", "Array.isArray", "Array.from",
|
|
186
|
+
"Math.floor", "Math.ceil", "Math.round", "Math.max", "Math.min",
|
|
187
|
+
"parseInt", "parseFloat", "String", "Number", "Boolean",
|
|
188
|
+
"setTimeout", "setInterval", "clearTimeout", "clearInterval",
|
|
189
|
+
"Promise.resolve", "Promise.reject", "Promise.all",
|
|
190
|
+
})
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def _find_line(source: str, pos: int) -> int:
|
|
194
|
+
"""Return 1-based line number for position pos in source."""
|
|
195
|
+
return source.count("\n", 0, pos) + 1
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def _find_block_end(source: str, start: int) -> int:
|
|
199
|
+
"""Find the matching closing brace for an opening brace at/near start."""
|
|
200
|
+
depth = 0
|
|
201
|
+
i = source.find("{", start)
|
|
202
|
+
if i == -1:
|
|
203
|
+
return start + 1
|
|
204
|
+
while i < len(source):
|
|
205
|
+
if source[i] == "{":
|
|
206
|
+
depth += 1
|
|
207
|
+
elif source[i] == "}":
|
|
208
|
+
depth -= 1
|
|
209
|
+
if depth == 0:
|
|
210
|
+
return _find_line(source, i)
|
|
211
|
+
i += 1
|
|
212
|
+
return _find_line(source, len(source) - 1)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def _find_block_end_pos(source: str, start: int) -> int | None:
|
|
216
|
+
"""Find the position of the matching closing brace."""
|
|
217
|
+
depth = 0
|
|
218
|
+
i = source.find("{", start)
|
|
219
|
+
if i == -1:
|
|
220
|
+
return None
|
|
221
|
+
while i < len(source):
|
|
222
|
+
if source[i] == "{":
|
|
223
|
+
depth += 1
|
|
224
|
+
elif source[i] == "}":
|
|
225
|
+
depth -= 1
|
|
226
|
+
if depth == 0:
|
|
227
|
+
return i
|
|
228
|
+
i += 1
|
|
229
|
+
return None
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _match_angle_brackets(source: str, start: int) -> int:
|
|
233
|
+
"""Match nested angle brackets starting from ``<`` at *start*.
|
|
234
|
+
|
|
235
|
+
Returns the position **after** the closing ``>``, or *start* if the
|
|
236
|
+
character at *start* is not ``<``. Handles nesting like
|
|
237
|
+
``<T extends Array<U>>``.
|
|
238
|
+
"""
|
|
239
|
+
if start >= len(source) or source[start] != "<":
|
|
240
|
+
return start
|
|
241
|
+
depth = 0
|
|
242
|
+
i = start
|
|
243
|
+
while i < len(source):
|
|
244
|
+
ch = source[i]
|
|
245
|
+
if ch == "<":
|
|
246
|
+
depth += 1
|
|
247
|
+
elif ch == ">":
|
|
248
|
+
depth -= 1
|
|
249
|
+
if depth == 0:
|
|
250
|
+
return i + 1
|
|
251
|
+
# Bail on clearly non-generic content
|
|
252
|
+
elif ch in ("{", "}", ";"):
|
|
253
|
+
return start
|
|
254
|
+
i += 1
|
|
255
|
+
return start
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _find_arrow_expr_end(source: str, arrow_pos: int) -> int:
|
|
259
|
+
"""Find the end of a single-expression arrow function body.
|
|
260
|
+
|
|
261
|
+
Starts scanning right after ``=>``. Handles parenthesized expressions,
|
|
262
|
+
template literals, string literals, and nested function calls. Returns
|
|
263
|
+
the position of the last character of the expression.
|
|
264
|
+
"""
|
|
265
|
+
i = arrow_pos + 2 # skip =>
|
|
266
|
+
# skip whitespace
|
|
267
|
+
while i < len(source) and source[i] in (" ", "\t", "\n", "\r"):
|
|
268
|
+
i += 1
|
|
269
|
+
|
|
270
|
+
if i >= len(source):
|
|
271
|
+
return len(source) - 1
|
|
272
|
+
|
|
273
|
+
# If it's a block body, delegate to _find_block_end_pos
|
|
274
|
+
if source[i] == "{":
|
|
275
|
+
end = _find_block_end_pos(source, i)
|
|
276
|
+
return end if end is not None else len(source) - 1
|
|
277
|
+
|
|
278
|
+
# Otherwise, scan for the expression end
|
|
279
|
+
paren_depth = 0
|
|
280
|
+
bracket_depth = 0
|
|
281
|
+
in_string = None # None, '"', "'", '`'
|
|
282
|
+
expr_start = i
|
|
283
|
+
while i < len(source):
|
|
284
|
+
ch = source[i]
|
|
285
|
+
|
|
286
|
+
# Handle string/template literals
|
|
287
|
+
if in_string:
|
|
288
|
+
if ch == "\\" and i + 1 < len(source):
|
|
289
|
+
i += 2 # skip escaped char
|
|
290
|
+
continue
|
|
291
|
+
if ch == in_string:
|
|
292
|
+
in_string = None
|
|
293
|
+
i += 1
|
|
294
|
+
continue
|
|
295
|
+
|
|
296
|
+
if ch in ('"', "'", "`"):
|
|
297
|
+
in_string = ch
|
|
298
|
+
i += 1
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
if ch == "(":
|
|
302
|
+
paren_depth += 1
|
|
303
|
+
elif ch == ")":
|
|
304
|
+
if paren_depth > 0:
|
|
305
|
+
paren_depth -= 1
|
|
306
|
+
else:
|
|
307
|
+
# unbalanced paren — we're past the expression
|
|
308
|
+
return i - 1
|
|
309
|
+
elif ch == "[":
|
|
310
|
+
bracket_depth += 1
|
|
311
|
+
elif ch == "]":
|
|
312
|
+
if bracket_depth > 0:
|
|
313
|
+
bracket_depth -= 1
|
|
314
|
+
else:
|
|
315
|
+
return i - 1
|
|
316
|
+
|
|
317
|
+
# End conditions: semicolon, newline at depth 0, comma at depth 0
|
|
318
|
+
if paren_depth == 0 and bracket_depth == 0:
|
|
319
|
+
if ch == ";":
|
|
320
|
+
return i
|
|
321
|
+
if ch == "\n":
|
|
322
|
+
# Check if next non-whitespace line starts a new statement
|
|
323
|
+
j = i + 1
|
|
324
|
+
while j < len(source) and source[j] in (" ", "\t"):
|
|
325
|
+
j += 1
|
|
326
|
+
if j >= len(source) or source[j] == "\n":
|
|
327
|
+
return i
|
|
328
|
+
# Continuation heuristics: operator, dot, ?, :, etc.
|
|
329
|
+
next_ch = source[j] if j < len(source) else ""
|
|
330
|
+
if next_ch not in (".", "?", ":", "+", "-", "*", "/", "%",
|
|
331
|
+
"&", "|", "^", "!", "=", "<", ">",
|
|
332
|
+
",", "(", "[", "`"):
|
|
333
|
+
return i
|
|
334
|
+
|
|
335
|
+
i += 1
|
|
336
|
+
|
|
337
|
+
return len(source) - 1
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def _collect_decorators(source: str, decl_pos: int) -> list[str]:
|
|
341
|
+
"""Collect decorator names from lines preceding a declaration at *decl_pos*."""
|
|
342
|
+
decorators: list[str] = []
|
|
343
|
+
# Walk backwards from decl_pos to find decorator lines
|
|
344
|
+
line_start = source.rfind("\n", 0, decl_pos)
|
|
345
|
+
if line_start == -1:
|
|
346
|
+
return decorators
|
|
347
|
+
# Scan backwards through preceding lines
|
|
348
|
+
pos = line_start
|
|
349
|
+
while pos > 0:
|
|
350
|
+
prev_line_start = source.rfind("\n", 0, pos)
|
|
351
|
+
if prev_line_start == -1:
|
|
352
|
+
prev_line_start = 0
|
|
353
|
+
line = source[prev_line_start:pos].strip()
|
|
354
|
+
if not line:
|
|
355
|
+
pos = prev_line_start
|
|
356
|
+
continue
|
|
357
|
+
dm = re.match(r"@(\w[\w.]*)", line)
|
|
358
|
+
if dm:
|
|
359
|
+
decorators.append(dm.group(1))
|
|
360
|
+
pos = prev_line_start
|
|
361
|
+
else:
|
|
362
|
+
break
|
|
363
|
+
decorators.reverse()
|
|
364
|
+
return decorators
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
# Significant comment patterns (JS/TS uses // and /* */ comments)
|
|
368
|
+
_SIGNIFICANT_COMMENT_JS = re.compile(
|
|
369
|
+
r"""(?://|/?\*)\s*(TODO|FIXME|HACK|NOTE|WARNING|BUG|XXX|IMPORTANT|REFACTOR)\b[:\s]*(.+)""",
|
|
370
|
+
re.IGNORECASE,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def _extract_significant_comments_js(source: str, max_comments: int = 20) -> list[str]:
|
|
375
|
+
"""Extract TODO, FIXME, HACK, NOTE comments from JS/TS source."""
|
|
376
|
+
comments: list[str] = []
|
|
377
|
+
for m in _SIGNIFICANT_COMMENT_JS.finditer(source):
|
|
378
|
+
tag = m.group(1).upper()
|
|
379
|
+
text = m.group(2).strip().rstrip("*/").strip()[:100]
|
|
380
|
+
line = source.count("\n", 0, m.start()) + 1
|
|
381
|
+
comments.append(f"L{line} {tag}: {text}")
|
|
382
|
+
if len(comments) >= max_comments:
|
|
383
|
+
break
|
|
384
|
+
return comments
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def _is_in_export(source: str, pos: int) -> bool:
|
|
388
|
+
"""Check if the statement at *pos* is an export."""
|
|
389
|
+
line_start = source.rfind("\n", 0, pos) + 1
|
|
390
|
+
line_end = source.find("\n", pos)
|
|
391
|
+
if line_end == -1:
|
|
392
|
+
line_end = len(source)
|
|
393
|
+
line = source[line_start:line_end]
|
|
394
|
+
return line.lstrip().startswith("export")
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _extract_calls(source_block: str, caller_id: str, file_path: str, base_line: int) -> list[ParsedEdge]:
|
|
398
|
+
"""Extract CALLS edges from a code block."""
|
|
399
|
+
edges: list[ParsedEdge] = []
|
|
400
|
+
seen: set[str] = set()
|
|
401
|
+
for call in _CALL_EXPR.finditer(source_block):
|
|
402
|
+
call_name = call.group(1)
|
|
403
|
+
if call_name in _CALL_SKIP:
|
|
404
|
+
continue
|
|
405
|
+
if call_name in seen:
|
|
406
|
+
continue
|
|
407
|
+
seen.add(call_name)
|
|
408
|
+
edges.append(ParsedEdge(
|
|
409
|
+
source=caller_id, target=call_name,
|
|
410
|
+
type="CALLS", file_path=file_path,
|
|
411
|
+
line=base_line + _find_line(source_block, call.start()) - 1,
|
|
412
|
+
))
|
|
413
|
+
return edges
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def _extract_hooks(source_block: str, caller_id: str, file_path: str, base_line: int) -> list[ParsedEdge]:
|
|
417
|
+
"""Extract hook usage edges from a code block."""
|
|
418
|
+
edges: list[ParsedEdge] = []
|
|
419
|
+
seen: set[str] = set()
|
|
420
|
+
for m in _HOOK_CALL.finditer(source_block):
|
|
421
|
+
hook_name = m.group(1)
|
|
422
|
+
if hook_name in seen:
|
|
423
|
+
continue
|
|
424
|
+
seen.add(hook_name)
|
|
425
|
+
edges.append(ParsedEdge(
|
|
426
|
+
source=caller_id, target=hook_name,
|
|
427
|
+
type="CALLS", file_path=file_path,
|
|
428
|
+
line=base_line + _find_line(source_block, m.start()) - 1,
|
|
429
|
+
))
|
|
430
|
+
return edges
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def _extract_jsx_refs(source_block: str, caller_id: str, file_path: str, base_line: int) -> list[ParsedEdge]:
|
|
434
|
+
"""Extract JSX component references as CALLS edges."""
|
|
435
|
+
edges: list[ParsedEdge] = []
|
|
436
|
+
seen: set[str] = set()
|
|
437
|
+
for m in _JSX_COMPONENT.finditer(source_block):
|
|
438
|
+
comp_name = m.group(1)
|
|
439
|
+
if comp_name in seen:
|
|
440
|
+
continue
|
|
441
|
+
# Skip HTML-like elements (all lowercase like View, Text are RN components — keep capitalized)
|
|
442
|
+
seen.add(comp_name)
|
|
443
|
+
edges.append(ParsedEdge(
|
|
444
|
+
source=caller_id, target=comp_name,
|
|
445
|
+
type="CALLS", file_path=file_path,
|
|
446
|
+
line=base_line + _find_line(source_block, m.start()) - 1,
|
|
447
|
+
))
|
|
448
|
+
return edges
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def _extract_api_calls(source_block: str, caller_id: str, file_path: str, base_line: int) -> list[ParsedEdge]:
|
|
452
|
+
"""Extract API endpoint references as DATAFLOW edges."""
|
|
453
|
+
edges: list[ParsedEdge] = []
|
|
454
|
+
for m in _API_CALL.finditer(source_block):
|
|
455
|
+
endpoint = m.group(1)
|
|
456
|
+
edges.append(ParsedEdge(
|
|
457
|
+
source=caller_id, target=f"API:{endpoint}",
|
|
458
|
+
type="DATAFLOW", file_path=file_path,
|
|
459
|
+
line=base_line + _find_line(source_block, m.start()) - 1,
|
|
460
|
+
))
|
|
461
|
+
return edges
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def parse_typescript_file(path: Path, repo_root: Path) -> ParsedFile:
|
|
465
|
+
"""Parse a TypeScript/JavaScript file and return a ParsedFile."""
|
|
466
|
+
rel_path = path.relative_to(repo_root).as_posix()
|
|
467
|
+
raw = path.read_bytes()
|
|
468
|
+
hash_val = content_hash(raw)
|
|
469
|
+
|
|
470
|
+
try:
|
|
471
|
+
source = raw.decode("utf-8")
|
|
472
|
+
except UnicodeDecodeError:
|
|
473
|
+
source = raw.decode("utf-8", errors="replace")
|
|
474
|
+
|
|
475
|
+
line_count = source.count("\n") + 1
|
|
476
|
+
module_name = rel_path.replace("/", ".").removesuffix(".ts").removesuffix(".tsx").removesuffix(".js").removesuffix(".jsx")
|
|
477
|
+
|
|
478
|
+
nodes: list[ParsedNode] = []
|
|
479
|
+
edges: list[ParsedEdge] = []
|
|
480
|
+
exported_names: set[str] = set()
|
|
481
|
+
seen_node_ids: set[str] = set()
|
|
482
|
+
|
|
483
|
+
# File-level node — use __module__ suffix to avoid collision with
|
|
484
|
+
# same-named component exports (e.g. ProfileScreen.tsx exports ProfileScreen)
|
|
485
|
+
file_node_id = f"{rel_path}::__module__"
|
|
486
|
+
# Extract significant comments for LLM context
|
|
487
|
+
significant_comments = _extract_significant_comments_js(source)
|
|
488
|
+
file_docstring = ""
|
|
489
|
+
if significant_comments:
|
|
490
|
+
file_docstring = f"[Comments: {' | '.join(significant_comments)}]"
|
|
491
|
+
|
|
492
|
+
file_node = ParsedNode(
|
|
493
|
+
id=file_node_id,
|
|
494
|
+
name=module_name.rsplit(".", 1)[-1],
|
|
495
|
+
qualified_name=module_name,
|
|
496
|
+
type="file",
|
|
497
|
+
file_path=rel_path,
|
|
498
|
+
line_start=1,
|
|
499
|
+
line_end=line_count,
|
|
500
|
+
is_exported=True,
|
|
501
|
+
docstring=file_docstring,
|
|
502
|
+
)
|
|
503
|
+
nodes.append(file_node)
|
|
504
|
+
seen_node_ids.add(file_node_id)
|
|
505
|
+
|
|
506
|
+
# --- Imports ---
|
|
507
|
+
for m in _IMPORT_DEFAULT.finditer(source):
|
|
508
|
+
name, module = m.group(1), m.group(2)
|
|
509
|
+
edges.append(ParsedEdge(
|
|
510
|
+
source=file_node_id, target=module,
|
|
511
|
+
type="IMPORTS", file_path=rel_path, line=_find_line(source, m.start()),
|
|
512
|
+
))
|
|
513
|
+
|
|
514
|
+
for m in _IMPORT_NAMED.finditer(source):
|
|
515
|
+
names_str, module = m.group(1), m.group(2)
|
|
516
|
+
line = _find_line(source, m.start())
|
|
517
|
+
for name in names_str.split(","):
|
|
518
|
+
name = name.strip().split(" as ")[0].strip()
|
|
519
|
+
if name:
|
|
520
|
+
edges.append(ParsedEdge(
|
|
521
|
+
source=file_node_id, target=f"{module}.{name}",
|
|
522
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
523
|
+
))
|
|
524
|
+
|
|
525
|
+
for m in _IMPORT_STAR.finditer(source):
|
|
526
|
+
alias, module = m.group(1), m.group(2)
|
|
527
|
+
edges.append(ParsedEdge(
|
|
528
|
+
source=file_node_id, target=module,
|
|
529
|
+
type="IMPORTS", file_path=rel_path, line=_find_line(source, m.start()),
|
|
530
|
+
))
|
|
531
|
+
|
|
532
|
+
for m in _REQUIRE.finditer(source):
|
|
533
|
+
module = m.group(3)
|
|
534
|
+
edges.append(ParsedEdge(
|
|
535
|
+
source=file_node_id, target=module,
|
|
536
|
+
type="IMPORTS", file_path=rel_path, line=_find_line(source, m.start()),
|
|
537
|
+
))
|
|
538
|
+
|
|
539
|
+
# --- Type imports ---
|
|
540
|
+
for m in _IMPORT_TYPE.finditer(source):
|
|
541
|
+
names_str, module = m.group(1), m.group(2)
|
|
542
|
+
line = _find_line(source, m.start())
|
|
543
|
+
for name in names_str.split(","):
|
|
544
|
+
name = name.strip().split(" as ")[0].strip()
|
|
545
|
+
if name:
|
|
546
|
+
edges.append(ParsedEdge(
|
|
547
|
+
source=file_node_id, target=f"{module}.{name}",
|
|
548
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
549
|
+
))
|
|
550
|
+
|
|
551
|
+
# --- Re-exports ---
|
|
552
|
+
for m in _REEXPORT_NAMED.finditer(source):
|
|
553
|
+
names_str, module = m.group(1), m.group(2)
|
|
554
|
+
line = _find_line(source, m.start())
|
|
555
|
+
edges.append(ParsedEdge(
|
|
556
|
+
source=file_node_id, target=module,
|
|
557
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
558
|
+
))
|
|
559
|
+
for name in names_str.split(","):
|
|
560
|
+
raw = name.strip()
|
|
561
|
+
# Handle "default as NewName" or "X as Y"
|
|
562
|
+
if " as " in raw:
|
|
563
|
+
parts = raw.split(" as ")
|
|
564
|
+
original = parts[0].strip()
|
|
565
|
+
alias = parts[1].strip()
|
|
566
|
+
exported_names.add(alias)
|
|
567
|
+
edges.append(ParsedEdge(
|
|
568
|
+
source=file_node_id, target=f"{module}.{original}",
|
|
569
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
570
|
+
))
|
|
571
|
+
# Create a re-exported node
|
|
572
|
+
node_id = f"{rel_path}::{alias}"
|
|
573
|
+
if node_id not in seen_node_ids:
|
|
574
|
+
seen_node_ids.add(node_id)
|
|
575
|
+
nodes.append(ParsedNode(
|
|
576
|
+
id=node_id, name=alias, qualified_name=alias,
|
|
577
|
+
type="variable", file_path=rel_path,
|
|
578
|
+
line_start=line, line_end=line,
|
|
579
|
+
is_exported=True, decorators=["re-export"],
|
|
580
|
+
))
|
|
581
|
+
else:
|
|
582
|
+
clean = raw.strip()
|
|
583
|
+
if clean:
|
|
584
|
+
exported_names.add(clean)
|
|
585
|
+
edges.append(ParsedEdge(
|
|
586
|
+
source=file_node_id, target=f"{module}.{clean}",
|
|
587
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
588
|
+
))
|
|
589
|
+
# Create a re-exported node
|
|
590
|
+
node_id = f"{rel_path}::{clean}"
|
|
591
|
+
if node_id not in seen_node_ids:
|
|
592
|
+
seen_node_ids.add(node_id)
|
|
593
|
+
nodes.append(ParsedNode(
|
|
594
|
+
id=node_id, name=clean, qualified_name=clean,
|
|
595
|
+
type="variable", file_path=rel_path,
|
|
596
|
+
line_start=line, line_end=line,
|
|
597
|
+
is_exported=True, decorators=["re-export"],
|
|
598
|
+
))
|
|
599
|
+
|
|
600
|
+
for m in _REEXPORT_STAR.finditer(source):
|
|
601
|
+
alias = m.group(1) # may be None
|
|
602
|
+
module = m.group(2)
|
|
603
|
+
line = _find_line(source, m.start())
|
|
604
|
+
edges.append(ParsedEdge(
|
|
605
|
+
source=file_node_id, target=module,
|
|
606
|
+
type="IMPORTS", file_path=rel_path, line=line,
|
|
607
|
+
))
|
|
608
|
+
if alias:
|
|
609
|
+
exported_names.add(alias)
|
|
610
|
+
node_id = f"{rel_path}::{alias}"
|
|
611
|
+
if node_id not in seen_node_ids:
|
|
612
|
+
seen_node_ids.add(node_id)
|
|
613
|
+
nodes.append(ParsedNode(
|
|
614
|
+
id=node_id, name=alias, qualified_name=alias,
|
|
615
|
+
type="variable", file_path=rel_path,
|
|
616
|
+
line_start=line, line_end=line,
|
|
617
|
+
is_exported=True, decorators=["re-export"],
|
|
618
|
+
))
|
|
619
|
+
|
|
620
|
+
# --- Functions (function keyword) ---
|
|
621
|
+
for m in _FUNCTION_DECL.finditer(source):
|
|
622
|
+
name = m.group(1)
|
|
623
|
+
params = m.group(3).strip()
|
|
624
|
+
return_type = m.group(4).strip() if m.group(4) else ""
|
|
625
|
+
line = _find_line(source, m.start())
|
|
626
|
+
is_exp = _is_in_export(source, m.start())
|
|
627
|
+
if is_exp:
|
|
628
|
+
exported_names.add(name)
|
|
629
|
+
sig = f"({params})"
|
|
630
|
+
if return_type:
|
|
631
|
+
sig += f" => {return_type}"
|
|
632
|
+
end_line = _find_block_end(source, m.start())
|
|
633
|
+
node_id = f"{rel_path}::{name}"
|
|
634
|
+
# Collect decorators from preceding lines
|
|
635
|
+
func_decorators = _collect_decorators(source, m.start())
|
|
636
|
+
if node_id not in seen_node_ids:
|
|
637
|
+
seen_node_ids.add(node_id)
|
|
638
|
+
nodes.append(ParsedNode(
|
|
639
|
+
id=node_id, name=name, qualified_name=name, type="function",
|
|
640
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
641
|
+
signature=sig, is_exported=is_exp,
|
|
642
|
+
decorators=func_decorators,
|
|
643
|
+
))
|
|
644
|
+
edges.append(ParsedEdge(
|
|
645
|
+
source=file_node_id, target=node_id,
|
|
646
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
647
|
+
))
|
|
648
|
+
# Extract calls within this function
|
|
649
|
+
block_end = _find_block_end_pos(source, m.start())
|
|
650
|
+
func_body = source[m.start():block_end + 1] if block_end is not None else ""
|
|
651
|
+
edges.extend(_extract_calls(func_body, node_id, rel_path, line))
|
|
652
|
+
edges.extend(_extract_hooks(func_body, node_id, rel_path, line))
|
|
653
|
+
edges.extend(_extract_jsx_refs(func_body, node_id, rel_path, line))
|
|
654
|
+
edges.extend(_extract_api_calls(func_body, node_id, rel_path, line))
|
|
655
|
+
|
|
656
|
+
# Extract nested functions
|
|
657
|
+
if func_body:
|
|
658
|
+
for nm in _NESTED_FUNCTION.finditer(func_body):
|
|
659
|
+
nname = nm.group(1)
|
|
660
|
+
nline = line + _find_line(func_body, nm.start()) - 1
|
|
661
|
+
nid = f"{rel_path}::{name}.{nname}"
|
|
662
|
+
if nid not in seen_node_ids:
|
|
663
|
+
seen_node_ids.add(nid)
|
|
664
|
+
nend = line + _find_line(func_body[nm.start():], func_body[nm.start():].find("}") if "}" in func_body[nm.start():] else 0)
|
|
665
|
+
nb_end = _find_block_end_pos(func_body, nm.start())
|
|
666
|
+
if nb_end is not None:
|
|
667
|
+
nend = line + _find_line(func_body, nb_end) - 1
|
|
668
|
+
nodes.append(ParsedNode(
|
|
669
|
+
id=nid, name=nname,
|
|
670
|
+
qualified_name=f"{name}.{nname}", type="function",
|
|
671
|
+
file_path=rel_path, line_start=nline, line_end=nend,
|
|
672
|
+
))
|
|
673
|
+
edges.append(ParsedEdge(
|
|
674
|
+
source=node_id, target=nid,
|
|
675
|
+
type="CONTAINS", file_path=rel_path, line=nline,
|
|
676
|
+
))
|
|
677
|
+
for nm in _NESTED_ARROW.finditer(func_body):
|
|
678
|
+
nname = nm.group(1)
|
|
679
|
+
nline = line + _find_line(func_body, nm.start()) - 1
|
|
680
|
+
nid = f"{rel_path}::{name}.{nname}"
|
|
681
|
+
if nid not in seen_node_ids:
|
|
682
|
+
seen_node_ids.add(nid)
|
|
683
|
+
nodes.append(ParsedNode(
|
|
684
|
+
id=nid, name=nname,
|
|
685
|
+
qualified_name=f"{name}.{nname}", type="function",
|
|
686
|
+
file_path=rel_path, line_start=nline, line_end=nline,
|
|
687
|
+
))
|
|
688
|
+
edges.append(ParsedEdge(
|
|
689
|
+
source=node_id, target=nid,
|
|
690
|
+
type="CONTAINS", file_path=rel_path, line=nline,
|
|
691
|
+
))
|
|
692
|
+
|
|
693
|
+
# --- Arrow functions (const foo = (...) => ...) ---
|
|
694
|
+
for m in _ARROW_CONST.finditer(source):
|
|
695
|
+
name = m.group(1)
|
|
696
|
+
line = _find_line(source, m.start())
|
|
697
|
+
is_exp = _is_in_export(source, m.start())
|
|
698
|
+
if is_exp:
|
|
699
|
+
exported_names.add(name)
|
|
700
|
+
node_id = f"{rel_path}::{name}"
|
|
701
|
+
arrow_decorators = _collect_decorators(source, m.start())
|
|
702
|
+
if node_id not in seen_node_ids:
|
|
703
|
+
seen_node_ids.add(node_id)
|
|
704
|
+
nodes.append(ParsedNode(
|
|
705
|
+
id=node_id, name=name, qualified_name=name, type="function",
|
|
706
|
+
file_path=rel_path, line_start=line, line_end=line,
|
|
707
|
+
is_exported=is_exp, decorators=arrow_decorators,
|
|
708
|
+
))
|
|
709
|
+
edges.append(ParsedEdge(
|
|
710
|
+
source=file_node_id, target=node_id,
|
|
711
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
712
|
+
))
|
|
713
|
+
# Find the arrow `=>` position, then look for `{` AFTER it
|
|
714
|
+
arrow_pos = source.find("=>", m.end() - 2)
|
|
715
|
+
if arrow_pos == -1:
|
|
716
|
+
arrow_pos = m.end()
|
|
717
|
+
block_end = _find_block_end_pos(source, arrow_pos)
|
|
718
|
+
if block_end is not None:
|
|
719
|
+
arrow_body = source[arrow_pos:block_end + 1]
|
|
720
|
+
# Update end_line for arrow functions with block bodies
|
|
721
|
+
for n in nodes:
|
|
722
|
+
if n.id == node_id:
|
|
723
|
+
n.line_end = _find_line(source, block_end)
|
|
724
|
+
break
|
|
725
|
+
else:
|
|
726
|
+
# Single-expression arrow: use smart expression-end detection
|
|
727
|
+
expr_end = _find_arrow_expr_end(source, arrow_pos)
|
|
728
|
+
arrow_body = source[arrow_pos:expr_end + 1]
|
|
729
|
+
for n in nodes:
|
|
730
|
+
if n.id == node_id:
|
|
731
|
+
n.line_end = _find_line(source, expr_end)
|
|
732
|
+
break
|
|
733
|
+
edges.extend(_extract_calls(arrow_body, node_id, rel_path, line))
|
|
734
|
+
edges.extend(_extract_hooks(arrow_body, node_id, rel_path, line))
|
|
735
|
+
edges.extend(_extract_jsx_refs(arrow_body, node_id, rel_path, line))
|
|
736
|
+
edges.extend(_extract_api_calls(arrow_body, node_id, rel_path, line))
|
|
737
|
+
|
|
738
|
+
# --- React wrapped components (memo, forwardRef, lazy) ---
|
|
739
|
+
for m in _REACT_WRAPPER.finditer(source):
|
|
740
|
+
name = m.group(1)
|
|
741
|
+
line = _find_line(source, m.start())
|
|
742
|
+
is_exp = _is_in_export(source, m.start())
|
|
743
|
+
if is_exp:
|
|
744
|
+
exported_names.add(name)
|
|
745
|
+
node_id = f"{rel_path}::{name}"
|
|
746
|
+
if node_id not in seen_node_ids:
|
|
747
|
+
seen_node_ids.add(node_id)
|
|
748
|
+
end_line = _find_block_end(source, m.start())
|
|
749
|
+
nodes.append(ParsedNode(
|
|
750
|
+
id=node_id, name=name, qualified_name=name, type="function",
|
|
751
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
752
|
+
is_exported=is_exp, decorators=["react_component"],
|
|
753
|
+
))
|
|
754
|
+
edges.append(ParsedEdge(
|
|
755
|
+
source=file_node_id, target=node_id,
|
|
756
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
757
|
+
))
|
|
758
|
+
# Extract calls from wrapped component body
|
|
759
|
+
block_end = _find_block_end_pos(source, m.start())
|
|
760
|
+
if block_end is not None:
|
|
761
|
+
comp_body = source[m.start():block_end + 1]
|
|
762
|
+
edges.extend(_extract_calls(comp_body, node_id, rel_path, line))
|
|
763
|
+
edges.extend(_extract_hooks(comp_body, node_id, rel_path, line))
|
|
764
|
+
edges.extend(_extract_jsx_refs(comp_body, node_id, rel_path, line))
|
|
765
|
+
edges.extend(_extract_api_calls(comp_body, node_id, rel_path, line))
|
|
766
|
+
|
|
767
|
+
# --- HOC-wrapped components (connect, withRouter, styled, observer) ---
|
|
768
|
+
for m in _REACT_HOC.finditer(source):
|
|
769
|
+
name = m.group(1)
|
|
770
|
+
line = _find_line(source, m.start())
|
|
771
|
+
is_exp = _is_in_export(source, m.start())
|
|
772
|
+
if is_exp:
|
|
773
|
+
exported_names.add(name)
|
|
774
|
+
node_id = f"{rel_path}::{name}"
|
|
775
|
+
if node_id not in seen_node_ids:
|
|
776
|
+
seen_node_ids.add(node_id)
|
|
777
|
+
nodes.append(ParsedNode(
|
|
778
|
+
id=node_id, name=name, qualified_name=name, type="function",
|
|
779
|
+
file_path=rel_path, line_start=line, line_end=line,
|
|
780
|
+
is_exported=is_exp, decorators=["hoc_wrapped"],
|
|
781
|
+
))
|
|
782
|
+
edges.append(ParsedEdge(
|
|
783
|
+
source=file_node_id, target=node_id,
|
|
784
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
785
|
+
))
|
|
786
|
+
|
|
787
|
+
# --- Classes ---
|
|
788
|
+
for m in _CLASS_DECL.finditer(source):
|
|
789
|
+
name = m.group(1)
|
|
790
|
+
extends = m.group(2)
|
|
791
|
+
implements = m.group(3)
|
|
792
|
+
line = _find_line(source, m.start())
|
|
793
|
+
end_line = _find_block_end(source, m.start())
|
|
794
|
+
is_exp = _is_in_export(source, m.start())
|
|
795
|
+
if is_exp:
|
|
796
|
+
exported_names.add(name)
|
|
797
|
+
node_id = f"{rel_path}::{name}"
|
|
798
|
+
class_decorators = _collect_decorators(source, m.start())
|
|
799
|
+
if node_id not in seen_node_ids:
|
|
800
|
+
seen_node_ids.add(node_id)
|
|
801
|
+
nodes.append(ParsedNode(
|
|
802
|
+
id=node_id, name=name, qualified_name=name, type="class",
|
|
803
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
804
|
+
is_exported=is_exp, decorators=class_decorators,
|
|
805
|
+
))
|
|
806
|
+
edges.append(ParsedEdge(
|
|
807
|
+
source=file_node_id, target=node_id,
|
|
808
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
809
|
+
))
|
|
810
|
+
if extends:
|
|
811
|
+
edges.append(ParsedEdge(
|
|
812
|
+
source=node_id, target=extends.strip(),
|
|
813
|
+
type="EXTENDS", file_path=rel_path, line=line,
|
|
814
|
+
))
|
|
815
|
+
if implements:
|
|
816
|
+
for iface in implements.split(","):
|
|
817
|
+
iface = iface.strip()
|
|
818
|
+
if iface:
|
|
819
|
+
edges.append(ParsedEdge(
|
|
820
|
+
source=node_id, target=iface,
|
|
821
|
+
type="IMPLEMENTS", file_path=rel_path, line=line,
|
|
822
|
+
))
|
|
823
|
+
|
|
824
|
+
# Extract methods within the class body
|
|
825
|
+
block_end_pos = _find_block_end_pos(source, m.start())
|
|
826
|
+
class_body = source[m.start():block_end_pos + 1] if block_end_pos is not None else source[m.start():]
|
|
827
|
+
for method_m in _METHOD_DECL.finditer(class_body):
|
|
828
|
+
method_name = method_m.group(1)
|
|
829
|
+
if method_name in ("if", "for", "while", "switch", "catch", "return", "new"):
|
|
830
|
+
continue
|
|
831
|
+
method_params = method_m.group(3).strip() if method_m.group(3) else ""
|
|
832
|
+
method_return = method_m.group(4).strip() if method_m.group(4) else ""
|
|
833
|
+
method_line = line + _find_line(class_body, method_m.start()) - 1
|
|
834
|
+
method_sig = f"({method_params})"
|
|
835
|
+
if method_return:
|
|
836
|
+
method_sig += f" => {method_return}"
|
|
837
|
+
method_id = f"{rel_path}::{name}.{method_name}"
|
|
838
|
+
if method_id not in seen_node_ids:
|
|
839
|
+
seen_node_ids.add(method_id)
|
|
840
|
+
# Find method body for call extraction
|
|
841
|
+
method_block_end = _find_block_end_pos(class_body, method_m.start())
|
|
842
|
+
method_end_line = method_line
|
|
843
|
+
if method_block_end is not None:
|
|
844
|
+
method_body = class_body[method_m.start():method_block_end + 1]
|
|
845
|
+
method_end_line = line + _find_line(class_body, method_block_end) - 1
|
|
846
|
+
edges.extend(_extract_calls(method_body, method_id, rel_path, method_line))
|
|
847
|
+
edges.extend(_extract_hooks(method_body, method_id, rel_path, method_line))
|
|
848
|
+
edges.extend(_extract_jsx_refs(method_body, method_id, rel_path, method_line))
|
|
849
|
+
edges.extend(_extract_api_calls(method_body, method_id, rel_path, method_line))
|
|
850
|
+
nodes.append(ParsedNode(
|
|
851
|
+
id=method_id, name=method_name,
|
|
852
|
+
qualified_name=f"{name}.{method_name}", type="method",
|
|
853
|
+
file_path=rel_path, line_start=method_line, line_end=method_end_line,
|
|
854
|
+
signature=method_sig,
|
|
855
|
+
))
|
|
856
|
+
edges.append(ParsedEdge(
|
|
857
|
+
source=node_id, target=method_id,
|
|
858
|
+
type="CONTAINS", file_path=rel_path, line=method_line,
|
|
859
|
+
))
|
|
860
|
+
|
|
861
|
+
# --- Interfaces ---
|
|
862
|
+
for m in _INTERFACE_DECL.finditer(source):
|
|
863
|
+
name = m.group(1)
|
|
864
|
+
extends = m.group(2)
|
|
865
|
+
line = _find_line(source, m.start())
|
|
866
|
+
end_line = _find_block_end(source, m.start())
|
|
867
|
+
is_exp = _is_in_export(source, m.start())
|
|
868
|
+
if is_exp:
|
|
869
|
+
exported_names.add(name)
|
|
870
|
+
node_id = f"{rel_path}::{name}"
|
|
871
|
+
if node_id not in seen_node_ids:
|
|
872
|
+
seen_node_ids.add(node_id)
|
|
873
|
+
nodes.append(ParsedNode(
|
|
874
|
+
id=node_id, name=name, qualified_name=name, type="class",
|
|
875
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
876
|
+
is_exported=is_exp, decorators=["interface"],
|
|
877
|
+
))
|
|
878
|
+
edges.append(ParsedEdge(
|
|
879
|
+
source=file_node_id, target=node_id,
|
|
880
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
881
|
+
))
|
|
882
|
+
if extends:
|
|
883
|
+
for base in extends.split(","):
|
|
884
|
+
base = base.strip()
|
|
885
|
+
if base:
|
|
886
|
+
edges.append(ParsedEdge(
|
|
887
|
+
source=node_id, target=base,
|
|
888
|
+
type="EXTENDS", file_path=rel_path, line=line,
|
|
889
|
+
))
|
|
890
|
+
|
|
891
|
+
# --- Type aliases ---
|
|
892
|
+
for m in _TYPE_ALIAS.finditer(source):
|
|
893
|
+
name = m.group(1)
|
|
894
|
+
line = _find_line(source, m.start())
|
|
895
|
+
is_exp = _is_in_export(source, m.start())
|
|
896
|
+
if is_exp:
|
|
897
|
+
exported_names.add(name)
|
|
898
|
+
node_id = f"{rel_path}::{name}"
|
|
899
|
+
if node_id not in seen_node_ids:
|
|
900
|
+
seen_node_ids.add(node_id)
|
|
901
|
+
nodes.append(ParsedNode(
|
|
902
|
+
id=node_id, name=name, qualified_name=name, type="variable",
|
|
903
|
+
file_path=rel_path, line_start=line, line_end=line,
|
|
904
|
+
is_exported=is_exp, decorators=["type_alias"],
|
|
905
|
+
))
|
|
906
|
+
|
|
907
|
+
# --- Enums ---
|
|
908
|
+
for m in _ENUM_DECL.finditer(source):
|
|
909
|
+
name = m.group(1)
|
|
910
|
+
line = _find_line(source, m.start())
|
|
911
|
+
end_line = _find_block_end(source, m.start())
|
|
912
|
+
is_exp = _is_in_export(source, m.start())
|
|
913
|
+
if is_exp:
|
|
914
|
+
exported_names.add(name)
|
|
915
|
+
node_id = f"{rel_path}::{name}"
|
|
916
|
+
if node_id not in seen_node_ids:
|
|
917
|
+
seen_node_ids.add(node_id)
|
|
918
|
+
nodes.append(ParsedNode(
|
|
919
|
+
id=node_id, name=name, qualified_name=name, type="class",
|
|
920
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
921
|
+
is_exported=is_exp, decorators=["enum"],
|
|
922
|
+
))
|
|
923
|
+
edges.append(ParsedEdge(
|
|
924
|
+
source=file_node_id, target=node_id,
|
|
925
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
926
|
+
))
|
|
927
|
+
|
|
928
|
+
# --- Namespaces / modules ---
|
|
929
|
+
for m in _NAMESPACE_DECL.finditer(source):
|
|
930
|
+
name = m.group(1)
|
|
931
|
+
line = _find_line(source, m.start())
|
|
932
|
+
end_line = _find_block_end(source, m.start())
|
|
933
|
+
is_exp = _is_in_export(source, m.start())
|
|
934
|
+
if is_exp:
|
|
935
|
+
exported_names.add(name)
|
|
936
|
+
node_id = f"{rel_path}::{name}"
|
|
937
|
+
if node_id not in seen_node_ids:
|
|
938
|
+
seen_node_ids.add(node_id)
|
|
939
|
+
nodes.append(ParsedNode(
|
|
940
|
+
id=node_id, name=name, qualified_name=name, type="class",
|
|
941
|
+
file_path=rel_path, line_start=line, line_end=end_line,
|
|
942
|
+
is_exported=is_exp, decorators=["namespace"],
|
|
943
|
+
))
|
|
944
|
+
edges.append(ParsedEdge(
|
|
945
|
+
source=file_node_id, target=node_id,
|
|
946
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
947
|
+
))
|
|
948
|
+
|
|
949
|
+
# Extract nested declarations inside the namespace
|
|
950
|
+
# Use patterns without ^ anchor since namespace body is indented
|
|
951
|
+
_NS_FUNC = re.compile(
|
|
952
|
+
r"""(?:export\s+)?(?:async\s+)?function\s+(\w+)\s*(?:<[^(]*>)?\s*\([^)]*\)(?:\s*:\s*[^\n{]+)?\s*\{""",
|
|
953
|
+
)
|
|
954
|
+
_NS_IFACE = re.compile(
|
|
955
|
+
r"""(?:export\s+)?interface\s+(\w+)(?:\s+extends\s+[\w.,\s]+)?\s*\{""",
|
|
956
|
+
)
|
|
957
|
+
_NS_CLASS = re.compile(
|
|
958
|
+
r"""(?:export\s+)?(?:abstract\s+)?class\s+(\w+)[^{]*\{""",
|
|
959
|
+
)
|
|
960
|
+
_NS_ENUM = re.compile(
|
|
961
|
+
r"""(?:export\s+)?(?:const\s+)?enum\s+(\w+)\s*\{""",
|
|
962
|
+
)
|
|
963
|
+
_NS_TYPE = re.compile(
|
|
964
|
+
r"""(?:export\s+)?type\s+(\w+)(?:<[^>]*>)?\s*=""",
|
|
965
|
+
)
|
|
966
|
+
block_end_pos = _find_block_end_pos(source, m.start())
|
|
967
|
+
if block_end_pos is not None:
|
|
968
|
+
# Skip the namespace header line to avoid re-matching the namespace itself
|
|
969
|
+
first_brace = source.find("{", m.start())
|
|
970
|
+
ns_body = source[first_brace + 1:block_end_pos]
|
|
971
|
+
ns_body_offset = first_brace + 1
|
|
972
|
+
for fn_m in _NS_FUNC.finditer(ns_body):
|
|
973
|
+
fn_name = fn_m.group(1)
|
|
974
|
+
fn_line = _find_line(source, ns_body_offset + fn_m.start())
|
|
975
|
+
fn_id = f"{rel_path}::{name}.{fn_name}"
|
|
976
|
+
if fn_id not in seen_node_ids:
|
|
977
|
+
seen_node_ids.add(fn_id)
|
|
978
|
+
fn_end = fn_line
|
|
979
|
+
nb_end = _find_block_end_pos(ns_body, fn_m.start())
|
|
980
|
+
if nb_end is not None:
|
|
981
|
+
fn_end = _find_line(source, ns_body_offset + nb_end)
|
|
982
|
+
nodes.append(ParsedNode(
|
|
983
|
+
id=fn_id, name=fn_name,
|
|
984
|
+
qualified_name=f"{name}.{fn_name}", type="function",
|
|
985
|
+
file_path=rel_path, line_start=fn_line, line_end=fn_end,
|
|
986
|
+
))
|
|
987
|
+
edges.append(ParsedEdge(
|
|
988
|
+
source=node_id, target=fn_id,
|
|
989
|
+
type="CONTAINS", file_path=rel_path, line=fn_line,
|
|
990
|
+
))
|
|
991
|
+
for iface_m in _NS_IFACE.finditer(ns_body):
|
|
992
|
+
iname = iface_m.group(1)
|
|
993
|
+
iline = _find_line(source, ns_body_offset + iface_m.start())
|
|
994
|
+
iid = f"{rel_path}::{name}.{iname}"
|
|
995
|
+
if iid not in seen_node_ids:
|
|
996
|
+
seen_node_ids.add(iid)
|
|
997
|
+
iend = iline
|
|
998
|
+
ib_end = _find_block_end_pos(ns_body, iface_m.start())
|
|
999
|
+
if ib_end is not None:
|
|
1000
|
+
iend = _find_line(source, ns_body_offset + ib_end)
|
|
1001
|
+
nodes.append(ParsedNode(
|
|
1002
|
+
id=iid, name=iname,
|
|
1003
|
+
qualified_name=f"{name}.{iname}", type="class",
|
|
1004
|
+
file_path=rel_path, line_start=iline, line_end=iend,
|
|
1005
|
+
decorators=["interface"],
|
|
1006
|
+
))
|
|
1007
|
+
edges.append(ParsedEdge(
|
|
1008
|
+
source=node_id, target=iid,
|
|
1009
|
+
type="CONTAINS", file_path=rel_path, line=iline,
|
|
1010
|
+
))
|
|
1011
|
+
for cls_m in _NS_CLASS.finditer(ns_body):
|
|
1012
|
+
cname = cls_m.group(1)
|
|
1013
|
+
cline = _find_line(source, ns_body_offset + cls_m.start())
|
|
1014
|
+
cid = f"{rel_path}::{name}.{cname}"
|
|
1015
|
+
if cid not in seen_node_ids:
|
|
1016
|
+
seen_node_ids.add(cid)
|
|
1017
|
+
cend = cline
|
|
1018
|
+
cb_end = _find_block_end_pos(ns_body, cls_m.start())
|
|
1019
|
+
if cb_end is not None:
|
|
1020
|
+
cend = _find_line(source, ns_body_offset + cb_end)
|
|
1021
|
+
nodes.append(ParsedNode(
|
|
1022
|
+
id=cid, name=cname,
|
|
1023
|
+
qualified_name=f"{name}.{cname}", type="class",
|
|
1024
|
+
file_path=rel_path, line_start=cline, line_end=cend,
|
|
1025
|
+
))
|
|
1026
|
+
edges.append(ParsedEdge(
|
|
1027
|
+
source=node_id, target=cid,
|
|
1028
|
+
type="CONTAINS", file_path=rel_path, line=cline,
|
|
1029
|
+
))
|
|
1030
|
+
for enum_m in _NS_ENUM.finditer(ns_body):
|
|
1031
|
+
ename = enum_m.group(1)
|
|
1032
|
+
eline = _find_line(source, ns_body_offset + enum_m.start())
|
|
1033
|
+
eid = f"{rel_path}::{name}.{ename}"
|
|
1034
|
+
if eid not in seen_node_ids:
|
|
1035
|
+
seen_node_ids.add(eid)
|
|
1036
|
+
eend = eline
|
|
1037
|
+
eb_end = _find_block_end_pos(ns_body, enum_m.start())
|
|
1038
|
+
if eb_end is not None:
|
|
1039
|
+
eend = _find_line(source, ns_body_offset + eb_end)
|
|
1040
|
+
nodes.append(ParsedNode(
|
|
1041
|
+
id=eid, name=ename,
|
|
1042
|
+
qualified_name=f"{name}.{ename}", type="class",
|
|
1043
|
+
file_path=rel_path, line_start=eline, line_end=eend,
|
|
1044
|
+
decorators=["enum"],
|
|
1045
|
+
))
|
|
1046
|
+
edges.append(ParsedEdge(
|
|
1047
|
+
source=node_id, target=eid,
|
|
1048
|
+
type="CONTAINS", file_path=rel_path, line=eline,
|
|
1049
|
+
))
|
|
1050
|
+
for type_m in _NS_TYPE.finditer(ns_body):
|
|
1051
|
+
tname = type_m.group(1)
|
|
1052
|
+
tline = _find_line(source, ns_body_offset + type_m.start())
|
|
1053
|
+
tid = f"{rel_path}::{name}.{tname}"
|
|
1054
|
+
if tid not in seen_node_ids:
|
|
1055
|
+
seen_node_ids.add(tid)
|
|
1056
|
+
nodes.append(ParsedNode(
|
|
1057
|
+
id=tid, name=tname,
|
|
1058
|
+
qualified_name=f"{name}.{tname}", type="variable",
|
|
1059
|
+
file_path=rel_path, line_start=tline, line_end=tline,
|
|
1060
|
+
decorators=["type_alias"],
|
|
1061
|
+
))
|
|
1062
|
+
|
|
1063
|
+
# --- Module-level variables (not already captured as functions/classes) ---
|
|
1064
|
+
for m in _VARIABLE_DECL.finditer(source):
|
|
1065
|
+
name = m.group(1)
|
|
1066
|
+
type_ann = m.group(2).strip() if m.group(2) else ""
|
|
1067
|
+
line = _find_line(source, m.start())
|
|
1068
|
+
node_id = f"{rel_path}::{name}"
|
|
1069
|
+
if node_id in seen_node_ids:
|
|
1070
|
+
continue
|
|
1071
|
+
# Skip if this is inside a class/function body (indented)
|
|
1072
|
+
line_start_pos = source.rfind("\n", 0, m.start()) + 1
|
|
1073
|
+
indent = m.start() - line_start_pos
|
|
1074
|
+
if indent > 2:
|
|
1075
|
+
continue
|
|
1076
|
+
is_exp = _is_in_export(source, m.start())
|
|
1077
|
+
if is_exp:
|
|
1078
|
+
exported_names.add(name)
|
|
1079
|
+
seen_node_ids.add(node_id)
|
|
1080
|
+
decorators = []
|
|
1081
|
+
if type_ann:
|
|
1082
|
+
decorators.append(f"type:{type_ann[:50]}")
|
|
1083
|
+
nodes.append(ParsedNode(
|
|
1084
|
+
id=node_id, name=name, qualified_name=name, type="variable",
|
|
1085
|
+
file_path=rel_path, line_start=line, line_end=line,
|
|
1086
|
+
is_exported=is_exp, decorators=decorators,
|
|
1087
|
+
))
|
|
1088
|
+
edges.append(ParsedEdge(
|
|
1089
|
+
source=file_node_id, target=node_id,
|
|
1090
|
+
type="CONTAINS", file_path=rel_path, line=line,
|
|
1091
|
+
))
|
|
1092
|
+
|
|
1093
|
+
# --- Object property methods (const obj = { method() {} }) ---
|
|
1094
|
+
# Look for variable assignments whose RHS starts with `{`
|
|
1095
|
+
_OBJ_LITERAL = re.compile(
|
|
1096
|
+
r"""^(?:export\s+)?(?:const|let|var)\s+(\w+)\s*(?::\s*[^=]+?)?\s*=\s*\{""",
|
|
1097
|
+
re.MULTILINE,
|
|
1098
|
+
)
|
|
1099
|
+
for m in _OBJ_LITERAL.finditer(source):
|
|
1100
|
+
obj_name = m.group(1)
|
|
1101
|
+
obj_node_id = f"{rel_path}::{obj_name}"
|
|
1102
|
+
# Only process if this is a top-level variable (not already a known function)
|
|
1103
|
+
line_start_pos = source.rfind("\n", 0, m.start()) + 1
|
|
1104
|
+
indent = m.start() - line_start_pos
|
|
1105
|
+
if indent > 2:
|
|
1106
|
+
continue
|
|
1107
|
+
block_end_pos = _find_block_end_pos(source, m.start())
|
|
1108
|
+
if block_end_pos is None:
|
|
1109
|
+
continue
|
|
1110
|
+
obj_body = source[m.start():block_end_pos + 1]
|
|
1111
|
+
obj_line = _find_line(source, m.start())
|
|
1112
|
+
# Find methods: name(...) { or name: (...) =>
|
|
1113
|
+
for om in _OBJ_METHOD.finditer(obj_body):
|
|
1114
|
+
mname = om.group(1)
|
|
1115
|
+
if mname in ("if", "for", "while", "switch", "catch", "return", "new", "get", "set"):
|
|
1116
|
+
continue
|
|
1117
|
+
mline = obj_line + _find_line(obj_body, om.start()) - 1
|
|
1118
|
+
mid = f"{rel_path}::{obj_name}.{mname}"
|
|
1119
|
+
if mid not in seen_node_ids:
|
|
1120
|
+
seen_node_ids.add(mid)
|
|
1121
|
+
mend = mline
|
|
1122
|
+
mb_end = _find_block_end_pos(obj_body, om.start())
|
|
1123
|
+
if mb_end is not None:
|
|
1124
|
+
mend = obj_line + _find_line(obj_body, mb_end) - 1
|
|
1125
|
+
nodes.append(ParsedNode(
|
|
1126
|
+
id=mid, name=mname,
|
|
1127
|
+
qualified_name=f"{obj_name}.{mname}", type="method",
|
|
1128
|
+
file_path=rel_path, line_start=mline, line_end=mend,
|
|
1129
|
+
))
|
|
1130
|
+
edges.append(ParsedEdge(
|
|
1131
|
+
source=obj_node_id, target=mid,
|
|
1132
|
+
type="CONTAINS", file_path=rel_path, line=mline,
|
|
1133
|
+
))
|
|
1134
|
+
for om in _OBJ_METHOD_ARROW.finditer(obj_body):
|
|
1135
|
+
mname = om.group(1)
|
|
1136
|
+
mline = obj_line + _find_line(obj_body, om.start()) - 1
|
|
1137
|
+
mid = f"{rel_path}::{obj_name}.{mname}"
|
|
1138
|
+
if mid not in seen_node_ids:
|
|
1139
|
+
seen_node_ids.add(mid)
|
|
1140
|
+
nodes.append(ParsedNode(
|
|
1141
|
+
id=mid, name=mname,
|
|
1142
|
+
qualified_name=f"{obj_name}.{mname}", type="method",
|
|
1143
|
+
file_path=rel_path, line_start=mline, line_end=mline,
|
|
1144
|
+
))
|
|
1145
|
+
edges.append(ParsedEdge(
|
|
1146
|
+
source=obj_node_id, target=mid,
|
|
1147
|
+
type="CONTAINS", file_path=rel_path, line=mline,
|
|
1148
|
+
))
|
|
1149
|
+
|
|
1150
|
+
# --- Navigation screens ---
|
|
1151
|
+
for m in _NAV_NAVIGATOR.finditer(source):
|
|
1152
|
+
name = m.group(1)
|
|
1153
|
+
line = _find_line(source, m.start())
|
|
1154
|
+
node_id = f"{rel_path}::{name}"
|
|
1155
|
+
if node_id not in seen_node_ids:
|
|
1156
|
+
seen_node_ids.add(node_id)
|
|
1157
|
+
nodes.append(ParsedNode(
|
|
1158
|
+
id=node_id, name=name, qualified_name=name, type="variable",
|
|
1159
|
+
file_path=rel_path, line_start=line, line_end=line,
|
|
1160
|
+
decorators=["navigator"],
|
|
1161
|
+
))
|
|
1162
|
+
|
|
1163
|
+
for m in _NAV_SCREEN.finditer(source):
|
|
1164
|
+
screen_name = m.group(1)
|
|
1165
|
+
line = _find_line(source, m.start())
|
|
1166
|
+
edges.append(ParsedEdge(
|
|
1167
|
+
source=file_node_id, target=screen_name,
|
|
1168
|
+
type="CALLS", file_path=rel_path, line=line,
|
|
1169
|
+
))
|
|
1170
|
+
|
|
1171
|
+
return ParsedFile(
|
|
1172
|
+
path=rel_path,
|
|
1173
|
+
content_hash=hash_val,
|
|
1174
|
+
nodes=nodes,
|
|
1175
|
+
edges=edges,
|
|
1176
|
+
line_count=line_count,
|
|
1177
|
+
)
|
|
1178
|
+
|
|
1179
|
+
|
|
1180
|
+
class TypeScriptParser(BaseParser):
|
|
1181
|
+
"""BaseParser wrapper around the TypeScript/JavaScript regex parser."""
|
|
1182
|
+
|
|
1183
|
+
_TS_EXTENSIONS = frozenset({".ts", ".tsx", ".js", ".jsx"})
|
|
1184
|
+
|
|
1185
|
+
def extensions(self) -> frozenset[str]:
|
|
1186
|
+
return self._TS_EXTENSIONS
|
|
1187
|
+
|
|
1188
|
+
def parse(self, path: Path, repo_root: Path) -> ParsedFile:
|
|
1189
|
+
return parse_typescript_file(path, repo_root)
|