aurora-lsp 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aurora_lsp/__init__.py +28 -0
- aurora_lsp/analysis.py +485 -0
- aurora_lsp/client.py +305 -0
- aurora_lsp/diagnostics.py +207 -0
- aurora_lsp/facade.py +402 -0
- aurora_lsp/filters.py +195 -0
- aurora_lsp-0.1.0.dist-info/METADATA +195 -0
- aurora_lsp-0.1.0.dist-info/RECORD +9 -0
- aurora_lsp-0.1.0.dist-info/WHEEL +4 -0
aurora_lsp/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Aurora LSP - Code intelligence powered by Language Server Protocol.
|
|
2
|
+
|
|
3
|
+
Provides:
|
|
4
|
+
- Find usages (excluding imports)
|
|
5
|
+
- Dead code detection
|
|
6
|
+
- Linting diagnostics
|
|
7
|
+
- Call hierarchy (where supported)
|
|
8
|
+
|
|
9
|
+
Built on multilspy (Microsoft) with custom import filtering and analysis layers.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from aurora_lsp.analysis import CodeAnalyzer, SymbolKind
|
|
13
|
+
from aurora_lsp.client import AuroraLSPClient
|
|
14
|
+
from aurora_lsp.diagnostics import DiagnosticsFormatter
|
|
15
|
+
from aurora_lsp.facade import AuroraLSP
|
|
16
|
+
from aurora_lsp.filters import ImportFilter
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"AuroraLSP",
|
|
21
|
+
"AuroraLSPClient",
|
|
22
|
+
"CodeAnalyzer",
|
|
23
|
+
"ImportFilter",
|
|
24
|
+
"DiagnosticsFormatter",
|
|
25
|
+
"SymbolKind",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
__version__ = "0.1.0"
|
aurora_lsp/analysis.py
ADDED
|
@@ -0,0 +1,485 @@
|
|
|
1
|
+
"""Code analysis layer - dead code detection, usage summary, call hierarchy.
|
|
2
|
+
|
|
3
|
+
Built on top of LSP client and import filtering.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from enum import IntEnum
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from aurora_lsp.filters import ImportFilter, get_filter_for_file
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from aurora_lsp.client import AuroraLSPClient
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SymbolKind(IntEnum):
|
|
23
|
+
"""LSP SymbolKind values (from LSP specification)."""
|
|
24
|
+
|
|
25
|
+
FILE = 1
|
|
26
|
+
MODULE = 2
|
|
27
|
+
NAMESPACE = 3
|
|
28
|
+
PACKAGE = 4
|
|
29
|
+
CLASS = 5
|
|
30
|
+
METHOD = 6
|
|
31
|
+
PROPERTY = 7
|
|
32
|
+
FIELD = 8
|
|
33
|
+
CONSTRUCTOR = 9
|
|
34
|
+
ENUM = 10
|
|
35
|
+
INTERFACE = 11
|
|
36
|
+
FUNCTION = 12
|
|
37
|
+
VARIABLE = 13
|
|
38
|
+
CONSTANT = 14
|
|
39
|
+
STRING = 15
|
|
40
|
+
NUMBER = 16
|
|
41
|
+
BOOLEAN = 17
|
|
42
|
+
ARRAY = 18
|
|
43
|
+
OBJECT = 19
|
|
44
|
+
KEY = 20
|
|
45
|
+
NULL = 21
|
|
46
|
+
ENUM_MEMBER = 22
|
|
47
|
+
STRUCT = 23
|
|
48
|
+
EVENT = 24
|
|
49
|
+
OPERATOR = 25
|
|
50
|
+
TYPE_PARAMETER = 26
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class CodeAnalyzer:
|
|
54
|
+
"""High-level code analysis using LSP.
|
|
55
|
+
|
|
56
|
+
Provides:
|
|
57
|
+
- Find usages (excluding imports)
|
|
58
|
+
- Dead code detection
|
|
59
|
+
- Usage summary with impact assessment
|
|
60
|
+
- Call hierarchy (callers/callees)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
# Symbol kinds that can have usages (functions, classes, methods)
|
|
64
|
+
ANALYZABLE_KINDS = {
|
|
65
|
+
SymbolKind.FUNCTION,
|
|
66
|
+
SymbolKind.CLASS,
|
|
67
|
+
SymbolKind.METHOD,
|
|
68
|
+
SymbolKind.INTERFACE,
|
|
69
|
+
SymbolKind.ENUM,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
def __init__(self, client: AuroraLSPClient, workspace: Path | str):
|
|
73
|
+
"""Initialize analyzer.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
client: LSP client for making requests.
|
|
77
|
+
workspace: Workspace root directory.
|
|
78
|
+
"""
|
|
79
|
+
self.client = client
|
|
80
|
+
self.workspace = Path(workspace).resolve()
|
|
81
|
+
self._file_cache: dict[str, list[str]] = {}
|
|
82
|
+
|
|
83
|
+
async def find_usages(
|
|
84
|
+
self,
|
|
85
|
+
file_path: str | Path,
|
|
86
|
+
line: int,
|
|
87
|
+
col: int,
|
|
88
|
+
include_imports: bool = False,
|
|
89
|
+
) -> dict:
|
|
90
|
+
"""Find usages of a symbol, optionally filtering imports.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
file_path: Path to file containing the symbol.
|
|
94
|
+
line: Line number (0-indexed).
|
|
95
|
+
col: Column number (0-indexed).
|
|
96
|
+
include_imports: Whether to include import statements.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Dict with 'usages', 'imports', 'total_usages', 'total_imports'.
|
|
100
|
+
"""
|
|
101
|
+
# Get all references from LSP
|
|
102
|
+
refs = await self.client.request_references(file_path, line, col)
|
|
103
|
+
|
|
104
|
+
if not refs:
|
|
105
|
+
return {
|
|
106
|
+
"usages": [],
|
|
107
|
+
"imports": [],
|
|
108
|
+
"total_usages": 0,
|
|
109
|
+
"total_imports": 0,
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# Filter imports
|
|
113
|
+
import_filter = get_filter_for_file(file_path)
|
|
114
|
+
usages, imports = await import_filter.filter_references(
|
|
115
|
+
refs, self._read_line
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if not include_imports:
|
|
119
|
+
return {
|
|
120
|
+
"usages": usages,
|
|
121
|
+
"imports": imports,
|
|
122
|
+
"total_usages": len(usages),
|
|
123
|
+
"total_imports": len(imports),
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
"usages": usages + imports,
|
|
128
|
+
"imports": imports,
|
|
129
|
+
"total_usages": len(usages) + len(imports),
|
|
130
|
+
"total_imports": len(imports),
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
async def get_usage_summary(
|
|
134
|
+
self,
|
|
135
|
+
file_path: str | Path,
|
|
136
|
+
line: int,
|
|
137
|
+
col: int,
|
|
138
|
+
symbol_name: str | None = None,
|
|
139
|
+
) -> dict:
|
|
140
|
+
"""Get comprehensive usage summary for a symbol.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
file_path: Path to file containing the symbol.
|
|
144
|
+
line: Line number (0-indexed).
|
|
145
|
+
col: Column number (0-indexed).
|
|
146
|
+
symbol_name: Optional symbol name for display.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Dict with usage counts, impact level, and grouped usages.
|
|
150
|
+
"""
|
|
151
|
+
result = await self.find_usages(file_path, line, col, include_imports=False)
|
|
152
|
+
usages = result["usages"]
|
|
153
|
+
imports = result["imports"]
|
|
154
|
+
|
|
155
|
+
# Calculate impact
|
|
156
|
+
count = len(usages)
|
|
157
|
+
if count > 10:
|
|
158
|
+
impact = "high"
|
|
159
|
+
elif count >= 3:
|
|
160
|
+
impact = "medium"
|
|
161
|
+
else:
|
|
162
|
+
impact = "low"
|
|
163
|
+
|
|
164
|
+
# Group by file
|
|
165
|
+
by_file: dict[str, list[dict]] = {}
|
|
166
|
+
for u in usages:
|
|
167
|
+
f = u.get("file", "unknown")
|
|
168
|
+
if f not in by_file:
|
|
169
|
+
by_file[f] = []
|
|
170
|
+
by_file[f].append(u)
|
|
171
|
+
|
|
172
|
+
return {
|
|
173
|
+
"symbol": symbol_name,
|
|
174
|
+
"total_usages": count,
|
|
175
|
+
"total_imports": len(imports),
|
|
176
|
+
"impact": impact,
|
|
177
|
+
"files_affected": len(by_file),
|
|
178
|
+
"usages_by_file": by_file,
|
|
179
|
+
"usages": usages[:20], # Limit for display
|
|
180
|
+
"imports": imports,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
async def find_dead_code(
|
|
184
|
+
self,
|
|
185
|
+
path: str | Path | None = None,
|
|
186
|
+
include_private: bool = False,
|
|
187
|
+
) -> list[dict]:
|
|
188
|
+
"""Find functions/classes with 0 usages (excluding imports).
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
path: Directory or file to analyze. Defaults to workspace.
|
|
192
|
+
include_private: Whether to include private symbols (_name).
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
List of dead code items with file, line, name, kind.
|
|
196
|
+
"""
|
|
197
|
+
dead = []
|
|
198
|
+
files = self._get_source_files(path)
|
|
199
|
+
|
|
200
|
+
for file_path in files:
|
|
201
|
+
try:
|
|
202
|
+
symbols = await self.client.request_document_symbols(file_path)
|
|
203
|
+
if not symbols:
|
|
204
|
+
continue
|
|
205
|
+
|
|
206
|
+
# Get import filter for this file type
|
|
207
|
+
import_filter = get_filter_for_file(file_path)
|
|
208
|
+
|
|
209
|
+
for symbol in self._flatten_symbols(symbols):
|
|
210
|
+
# Only check analyzable symbol kinds
|
|
211
|
+
kind = symbol.get("kind")
|
|
212
|
+
if kind not in self.ANALYZABLE_KINDS:
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
name = symbol.get("name", "")
|
|
216
|
+
|
|
217
|
+
# Skip private/dunder unless requested
|
|
218
|
+
if not include_private and name.startswith("_"):
|
|
219
|
+
continue
|
|
220
|
+
|
|
221
|
+
# Skip test functions
|
|
222
|
+
if name.startswith("test_"):
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
# Get symbol location - prefer selectionRange for accurate position
|
|
226
|
+
# selectionRange gives the actual symbol name position,
|
|
227
|
+
# while range gives the full extent (often starts at column 0)
|
|
228
|
+
sel_range = symbol.get("selectionRange", symbol.get("range", {}))
|
|
229
|
+
start = sel_range.get("start", {})
|
|
230
|
+
sym_line = start.get("line", 0)
|
|
231
|
+
sym_col = start.get("character", 0)
|
|
232
|
+
|
|
233
|
+
# Skip imported symbols (not actual definitions in this file)
|
|
234
|
+
# These appear when pyright reports imported names as symbols
|
|
235
|
+
line_content = await self._read_line(str(file_path), sym_line)
|
|
236
|
+
if import_filter.is_import_line(line_content):
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
# Skip symbols that look like type imports (common pattern)
|
|
240
|
+
# These often appear at the top of files and are single-word classes
|
|
241
|
+
if kind == SymbolKind.CLASS and sym_line < 50:
|
|
242
|
+
# Check if this is a well-known type import name
|
|
243
|
+
type_import_names = {
|
|
244
|
+
"Any", "Optional", "Union", "List", "Dict", "Set", "Tuple",
|
|
245
|
+
"Callable", "Iterator", "Iterable", "Generator", "Sequence",
|
|
246
|
+
"Mapping", "MutableMapping", "Type", "TypeVar", "Generic",
|
|
247
|
+
"Protocol", "Path", "datetime", "timezone", "timedelta",
|
|
248
|
+
"cast", "overload", "TYPE_CHECKING",
|
|
249
|
+
}
|
|
250
|
+
if name in type_import_names:
|
|
251
|
+
continue
|
|
252
|
+
|
|
253
|
+
# Find usages
|
|
254
|
+
usage_result = await self.find_usages(
|
|
255
|
+
file_path, sym_line, sym_col, include_imports=False
|
|
256
|
+
)
|
|
257
|
+
usages = usage_result["usages"]
|
|
258
|
+
|
|
259
|
+
# Exclude the definition itself
|
|
260
|
+
usages = [
|
|
261
|
+
u for u in usages
|
|
262
|
+
if not self._is_same_location(u, file_path, sym_line)
|
|
263
|
+
]
|
|
264
|
+
|
|
265
|
+
if len(usages) == 0:
|
|
266
|
+
dead.append({
|
|
267
|
+
"file": str(file_path),
|
|
268
|
+
"line": sym_line,
|
|
269
|
+
"name": name,
|
|
270
|
+
"kind": SymbolKind(kind).name.lower(),
|
|
271
|
+
"imports": usage_result["total_imports"],
|
|
272
|
+
})
|
|
273
|
+
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.warning(f"Error analyzing {file_path}: {e}")
|
|
276
|
+
continue
|
|
277
|
+
|
|
278
|
+
return dead
|
|
279
|
+
|
|
280
|
+
async def get_callers(
|
|
281
|
+
self,
|
|
282
|
+
file_path: str | Path,
|
|
283
|
+
line: int,
|
|
284
|
+
col: int,
|
|
285
|
+
) -> list[dict]:
|
|
286
|
+
"""Find functions that call this symbol (incoming calls).
|
|
287
|
+
|
|
288
|
+
Uses references and determines containing function for each.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
file_path: Path to file containing the symbol.
|
|
292
|
+
line: Line number (0-indexed).
|
|
293
|
+
col: Column number (0-indexed).
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
List of caller functions with file, line, name.
|
|
297
|
+
"""
|
|
298
|
+
# Get usages (excluding imports)
|
|
299
|
+
usage_result = await self.find_usages(file_path, line, col, include_imports=False)
|
|
300
|
+
usages = usage_result["usages"]
|
|
301
|
+
|
|
302
|
+
callers = []
|
|
303
|
+
seen = set()
|
|
304
|
+
|
|
305
|
+
for ref in usages:
|
|
306
|
+
ref_file = ref.get("file", "")
|
|
307
|
+
ref_line = ref.get("line", 0)
|
|
308
|
+
|
|
309
|
+
# Skip the definition itself
|
|
310
|
+
if self._is_same_location(ref, file_path, line):
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
# Get containing symbol
|
|
314
|
+
container = await self._get_containing_symbol(ref_file, ref_line)
|
|
315
|
+
if container:
|
|
316
|
+
# Deduplicate by container
|
|
317
|
+
key = (container["file"], container["line"], container["name"])
|
|
318
|
+
if key not in seen:
|
|
319
|
+
seen.add(key)
|
|
320
|
+
callers.append(container)
|
|
321
|
+
|
|
322
|
+
return callers
|
|
323
|
+
|
|
324
|
+
async def get_callees(
|
|
325
|
+
self,
|
|
326
|
+
file_path: str | Path,
|
|
327
|
+
line: int,
|
|
328
|
+
col: int,
|
|
329
|
+
) -> list[dict]:
|
|
330
|
+
"""Find functions called by this symbol (outgoing calls).
|
|
331
|
+
|
|
332
|
+
Note: This is limited without full AST parsing. Returns empty
|
|
333
|
+
list if call hierarchy is not supported by the language server.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
file_path: Path to file containing the symbol.
|
|
337
|
+
line: Line number (0-indexed).
|
|
338
|
+
col: Column number (0-indexed).
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
List of called functions (may be empty).
|
|
342
|
+
"""
|
|
343
|
+
# Outgoing calls require parsing the function body
|
|
344
|
+
# which is beyond basic LSP. Return empty for now.
|
|
345
|
+
# Could be implemented with tree-sitter in the future.
|
|
346
|
+
return []
|
|
347
|
+
|
|
348
|
+
async def _get_containing_symbol(
|
|
349
|
+
self,
|
|
350
|
+
file_path: str | Path,
|
|
351
|
+
line: int,
|
|
352
|
+
) -> dict | None:
|
|
353
|
+
"""Get the function/method containing a line.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
file_path: Path to file.
|
|
357
|
+
line: Line number to find container for.
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
Symbol dict with file, line, name, or None.
|
|
361
|
+
"""
|
|
362
|
+
try:
|
|
363
|
+
symbols = await self.client.request_document_symbols(file_path)
|
|
364
|
+
if not symbols:
|
|
365
|
+
return None
|
|
366
|
+
|
|
367
|
+
# Find deepest symbol containing the line
|
|
368
|
+
containing = None
|
|
369
|
+
|
|
370
|
+
for symbol in self._flatten_symbols(symbols):
|
|
371
|
+
kind = symbol.get("kind")
|
|
372
|
+
if kind not in {SymbolKind.FUNCTION, SymbolKind.METHOD}:
|
|
373
|
+
continue
|
|
374
|
+
|
|
375
|
+
range_info = symbol.get("range", {})
|
|
376
|
+
start = range_info.get("start", {}).get("line", 0)
|
|
377
|
+
end = range_info.get("end", {}).get("line", 0)
|
|
378
|
+
|
|
379
|
+
if start <= line <= end:
|
|
380
|
+
# Prefer the most specific (deepest) container
|
|
381
|
+
if containing is None or start > containing.get("line", 0):
|
|
382
|
+
containing = {
|
|
383
|
+
"file": str(file_path),
|
|
384
|
+
"line": start,
|
|
385
|
+
"name": symbol.get("name", ""),
|
|
386
|
+
"kind": SymbolKind(kind).name.lower(),
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
return containing
|
|
390
|
+
|
|
391
|
+
except Exception as e:
|
|
392
|
+
logger.debug(f"Error getting containing symbol: {e}")
|
|
393
|
+
return None
|
|
394
|
+
|
|
395
|
+
def _flatten_symbols(self, symbols: list) -> list[dict]:
|
|
396
|
+
"""Flatten nested symbol tree from LSP.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
symbols: Nested symbol list from document_symbols.
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
Flat list of all symbols.
|
|
403
|
+
"""
|
|
404
|
+
result = []
|
|
405
|
+
for s in symbols:
|
|
406
|
+
result.append(s)
|
|
407
|
+
children = s.get("children", [])
|
|
408
|
+
if children:
|
|
409
|
+
result.extend(self._flatten_symbols(children))
|
|
410
|
+
return result
|
|
411
|
+
|
|
412
|
+
def _get_source_files(self, path: str | Path | None = None) -> list[Path]:
|
|
413
|
+
"""Get all source files in a directory.
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
path: Directory or file path. Defaults to workspace.
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
List of source file paths.
|
|
420
|
+
"""
|
|
421
|
+
target = Path(path) if path else self.workspace
|
|
422
|
+
|
|
423
|
+
if target.is_file():
|
|
424
|
+
return [target]
|
|
425
|
+
|
|
426
|
+
# Supported extensions
|
|
427
|
+
extensions = {".py", ".js", ".ts", ".jsx", ".tsx", ".go", ".rs", ".java", ".rb"}
|
|
428
|
+
|
|
429
|
+
files = []
|
|
430
|
+
for ext in extensions:
|
|
431
|
+
files.extend(target.rglob(f"*{ext}"))
|
|
432
|
+
|
|
433
|
+
# Filter out common non-source directories
|
|
434
|
+
exclude_dirs = {"node_modules", ".git", "__pycache__", ".venv", "venv", "dist", "build"}
|
|
435
|
+
files = [
|
|
436
|
+
f for f in files
|
|
437
|
+
if not any(d in f.parts for d in exclude_dirs)
|
|
438
|
+
]
|
|
439
|
+
|
|
440
|
+
return sorted(files)
|
|
441
|
+
|
|
442
|
+
def _is_same_location(
|
|
443
|
+
self,
|
|
444
|
+
ref: dict,
|
|
445
|
+
file_path: str | Path,
|
|
446
|
+
line: int,
|
|
447
|
+
) -> bool:
|
|
448
|
+
"""Check if a reference is at the same location as given file/line."""
|
|
449
|
+
ref_file = Path(ref.get("file", ""))
|
|
450
|
+
target_file = Path(file_path)
|
|
451
|
+
|
|
452
|
+
# Normalize paths
|
|
453
|
+
try:
|
|
454
|
+
ref_file = ref_file.resolve()
|
|
455
|
+
target_file = target_file.resolve()
|
|
456
|
+
except Exception:
|
|
457
|
+
pass
|
|
458
|
+
|
|
459
|
+
return ref_file == target_file and ref.get("line") == line
|
|
460
|
+
|
|
461
|
+
async def _read_line(self, file_path: str, line: int) -> str:
|
|
462
|
+
"""Read a specific line from a file (async wrapper).
|
|
463
|
+
|
|
464
|
+
Args:
|
|
465
|
+
file_path: Path to file.
|
|
466
|
+
line: Line number (0-indexed).
|
|
467
|
+
|
|
468
|
+
Returns:
|
|
469
|
+
Line content.
|
|
470
|
+
"""
|
|
471
|
+
# Use cached file content if available
|
|
472
|
+
if file_path not in self._file_cache:
|
|
473
|
+
try:
|
|
474
|
+
path = Path(file_path)
|
|
475
|
+
if not path.is_absolute():
|
|
476
|
+
path = self.workspace / path
|
|
477
|
+
content = path.read_text(encoding="utf-8", errors="replace")
|
|
478
|
+
self._file_cache[file_path] = content.splitlines()
|
|
479
|
+
except Exception:
|
|
480
|
+
return ""
|
|
481
|
+
|
|
482
|
+
lines = self._file_cache[file_path]
|
|
483
|
+
if 0 <= line < len(lines):
|
|
484
|
+
return lines[line]
|
|
485
|
+
return ""
|