roma-debug 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- roma_debug/__init__.py +3 -0
- roma_debug/config.py +79 -0
- roma_debug/core/__init__.py +5 -0
- roma_debug/core/engine.py +423 -0
- roma_debug/core/models.py +313 -0
- roma_debug/main.py +753 -0
- roma_debug/parsers/__init__.py +21 -0
- roma_debug/parsers/base.py +189 -0
- roma_debug/parsers/python_ast_parser.py +268 -0
- roma_debug/parsers/registry.py +196 -0
- roma_debug/parsers/traceback_patterns.py +314 -0
- roma_debug/parsers/treesitter_parser.py +598 -0
- roma_debug/prompts.py +153 -0
- roma_debug/server.py +247 -0
- roma_debug/tracing/__init__.py +28 -0
- roma_debug/tracing/call_chain.py +278 -0
- roma_debug/tracing/context_builder.py +672 -0
- roma_debug/tracing/dependency_graph.py +298 -0
- roma_debug/tracing/error_analyzer.py +399 -0
- roma_debug/tracing/import_resolver.py +315 -0
- roma_debug/tracing/project_scanner.py +569 -0
- roma_debug/utils/__init__.py +5 -0
- roma_debug/utils/context.py +422 -0
- roma_debug-0.1.0.dist-info/METADATA +34 -0
- roma_debug-0.1.0.dist-info/RECORD +36 -0
- roma_debug-0.1.0.dist-info/WHEEL +5 -0
- roma_debug-0.1.0.dist-info/entry_points.txt +2 -0
- roma_debug-0.1.0.dist-info/licenses/LICENSE +201 -0
- roma_debug-0.1.0.dist-info/top_level.txt +2 -0
- tests/__init__.py +1 -0
- tests/test_context.py +208 -0
- tests/test_engine.py +296 -0
- tests/test_parsers.py +534 -0
- tests/test_project_scanner.py +275 -0
- tests/test_traceback_patterns.py +222 -0
- tests/test_tracing.py +296 -0
|
@@ -0,0 +1,672 @@
|
|
|
1
|
+
"""Context builder for deep debugging.
|
|
2
|
+
|
|
3
|
+
Assembles comprehensive context for AI analysis by combining
|
|
4
|
+
traceback information, import resolution, and dependency analysis.
|
|
5
|
+
Includes project scanning for deep project awareness.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional, List, Tuple
|
|
11
|
+
|
|
12
|
+
from roma_debug.core.models import (
|
|
13
|
+
Language, Import, FileContext, UpstreamContext, AnalysisContext,
|
|
14
|
+
ParsedTraceback, Symbol
|
|
15
|
+
)
|
|
16
|
+
from roma_debug.parsers.registry import get_parser, detect_language
|
|
17
|
+
from roma_debug.parsers.traceback_patterns import parse_traceback
|
|
18
|
+
from roma_debug.tracing.import_resolver import ImportResolver
|
|
19
|
+
from roma_debug.tracing.dependency_graph import DependencyGraph
|
|
20
|
+
from roma_debug.tracing.call_chain import CallChainAnalyzer, CallChain
|
|
21
|
+
from roma_debug.tracing.project_scanner import ProjectScanner, ProjectInfo
|
|
22
|
+
from roma_debug.tracing.error_analyzer import ErrorAnalyzer, ErrorAnalysis
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ContextBuilder:
|
|
26
|
+
"""Builds comprehensive context for AI-powered debugging.
|
|
27
|
+
|
|
28
|
+
Coordinates all the analysis components to produce rich context
|
|
29
|
+
for the AI to understand errors and suggest fixes.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
project_root: Optional[str] = None,
|
|
35
|
+
max_upstream_files: int = 5,
|
|
36
|
+
max_context_lines: int = 100,
|
|
37
|
+
scan_project: bool = True,
|
|
38
|
+
):
|
|
39
|
+
"""Initialize the context builder.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
project_root: Root directory of the project
|
|
43
|
+
max_upstream_files: Maximum upstream files to include
|
|
44
|
+
max_context_lines: Maximum lines per context snippet
|
|
45
|
+
scan_project: Whether to scan project structure on init
|
|
46
|
+
"""
|
|
47
|
+
self.project_root = Path(project_root) if project_root else Path.cwd()
|
|
48
|
+
self.max_upstream_files = max_upstream_files
|
|
49
|
+
self.max_context_lines = max_context_lines
|
|
50
|
+
|
|
51
|
+
self.import_resolver = ImportResolver(str(self.project_root))
|
|
52
|
+
self.dependency_graph = DependencyGraph(str(self.project_root))
|
|
53
|
+
self.call_chain_analyzer = CallChainAnalyzer(str(self.project_root))
|
|
54
|
+
|
|
55
|
+
# Project scanner for deep awareness
|
|
56
|
+
self.project_scanner = ProjectScanner(str(self.project_root))
|
|
57
|
+
self.error_analyzer = ErrorAnalyzer(self.project_scanner)
|
|
58
|
+
self._project_info: Optional[ProjectInfo] = None
|
|
59
|
+
|
|
60
|
+
if scan_project:
|
|
61
|
+
self._project_info = self.project_scanner.scan()
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def project_info(self) -> ProjectInfo:
|
|
65
|
+
"""Get project info, scanning if needed."""
|
|
66
|
+
if self._project_info is None:
|
|
67
|
+
self._project_info = self.project_scanner.scan()
|
|
68
|
+
return self._project_info
|
|
69
|
+
|
|
70
|
+
def build_analysis_context(
|
|
71
|
+
self,
|
|
72
|
+
error_log: str,
|
|
73
|
+
file_contexts: Optional[List[FileContext]] = None,
|
|
74
|
+
language_hint: Optional[Language] = None,
|
|
75
|
+
) -> AnalysisContext:
|
|
76
|
+
"""Build complete analysis context from an error log.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
error_log: The error log/traceback string
|
|
80
|
+
file_contexts: Optional pre-extracted file contexts
|
|
81
|
+
language_hint: Optional language hint
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
AnalysisContext ready for AI prompt
|
|
85
|
+
"""
|
|
86
|
+
# Parse the traceback
|
|
87
|
+
traceback = parse_traceback(error_log, language_hint)
|
|
88
|
+
|
|
89
|
+
# Get file contexts if not provided
|
|
90
|
+
if file_contexts is None:
|
|
91
|
+
file_contexts = self._extract_file_contexts(traceback)
|
|
92
|
+
|
|
93
|
+
if not file_contexts:
|
|
94
|
+
# No file contexts - return minimal context
|
|
95
|
+
return self._create_minimal_context(error_log, traceback)
|
|
96
|
+
|
|
97
|
+
# Determine primary context (usually the error location)
|
|
98
|
+
primary_context = self._get_primary_context(file_contexts, traceback)
|
|
99
|
+
|
|
100
|
+
# Resolve imports and build dependency graph
|
|
101
|
+
for ctx in file_contexts:
|
|
102
|
+
resolved_imports = self.import_resolver.resolve_imports(
|
|
103
|
+
ctx.imports,
|
|
104
|
+
Path(ctx.filepath),
|
|
105
|
+
)
|
|
106
|
+
ctx.imports = resolved_imports
|
|
107
|
+
self.dependency_graph.add_file_context(ctx)
|
|
108
|
+
|
|
109
|
+
# Build upstream context for deep debugging
|
|
110
|
+
upstream_context = self._build_upstream_context(
|
|
111
|
+
primary_context,
|
|
112
|
+
file_contexts,
|
|
113
|
+
traceback,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return AnalysisContext(
|
|
117
|
+
primary_context=primary_context,
|
|
118
|
+
traceback_contexts=file_contexts,
|
|
119
|
+
upstream_context=upstream_context,
|
|
120
|
+
parsed_traceback=traceback,
|
|
121
|
+
project_root=str(self.project_root),
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def _extract_file_contexts(self, traceback: ParsedTraceback) -> List[FileContext]:
|
|
125
|
+
"""Extract file contexts from traceback frames.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
traceback: Parsed traceback
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
List of FileContext objects
|
|
132
|
+
"""
|
|
133
|
+
contexts = []
|
|
134
|
+
|
|
135
|
+
for frame in traceback.frames:
|
|
136
|
+
context = self._extract_single_context(
|
|
137
|
+
frame.filepath,
|
|
138
|
+
frame.line_number,
|
|
139
|
+
traceback.language,
|
|
140
|
+
)
|
|
141
|
+
if context and context.context_type != "missing":
|
|
142
|
+
contexts.append(context)
|
|
143
|
+
|
|
144
|
+
return contexts
|
|
145
|
+
|
|
146
|
+
def _extract_single_context(
|
|
147
|
+
self,
|
|
148
|
+
filepath: str,
|
|
149
|
+
line_number: int,
|
|
150
|
+
language: Language,
|
|
151
|
+
) -> Optional[FileContext]:
|
|
152
|
+
"""Extract context from a single file.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
filepath: Path to the file
|
|
156
|
+
line_number: Error line number
|
|
157
|
+
language: Language of the file
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
FileContext or None if file not found
|
|
161
|
+
"""
|
|
162
|
+
# Try to resolve the file path
|
|
163
|
+
resolved_path = self._resolve_file_path(filepath)
|
|
164
|
+
if not resolved_path:
|
|
165
|
+
return FileContext(
|
|
166
|
+
filepath=filepath,
|
|
167
|
+
line_number=line_number,
|
|
168
|
+
context_type="missing",
|
|
169
|
+
content=f"[File not found: {filepath}]",
|
|
170
|
+
language=language,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
source = Path(resolved_path).read_text(encoding='utf-8', errors='replace')
|
|
175
|
+
lines = source.splitlines()
|
|
176
|
+
except Exception as e:
|
|
177
|
+
return FileContext(
|
|
178
|
+
filepath=filepath,
|
|
179
|
+
line_number=line_number,
|
|
180
|
+
context_type="missing",
|
|
181
|
+
content=f"[Error reading file: {e}]",
|
|
182
|
+
language=language,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Detect language if unknown
|
|
186
|
+
if language == Language.UNKNOWN:
|
|
187
|
+
language = detect_language(resolved_path)
|
|
188
|
+
|
|
189
|
+
# Try parser-based extraction
|
|
190
|
+
parser = get_parser(language, create_new=True)
|
|
191
|
+
if parser and parser.parse(source, resolved_path):
|
|
192
|
+
symbol = parser.find_enclosing_symbol(line_number)
|
|
193
|
+
imports = parser.extract_imports()
|
|
194
|
+
|
|
195
|
+
if symbol:
|
|
196
|
+
start = max(1, symbol.start_line - 2)
|
|
197
|
+
end = min(len(lines), symbol.end_line + 2)
|
|
198
|
+
snippet = parser.format_snippet(start, end, highlight_line=line_number)
|
|
199
|
+
|
|
200
|
+
return FileContext(
|
|
201
|
+
filepath=resolved_path,
|
|
202
|
+
line_number=line_number,
|
|
203
|
+
context_type="ast" if language == Language.PYTHON else "treesitter",
|
|
204
|
+
content=snippet,
|
|
205
|
+
function_name=symbol.name if symbol.kind in ("function", "method") else None,
|
|
206
|
+
class_name=symbol.parent.name if symbol.parent and symbol.parent.kind == "class" else None,
|
|
207
|
+
language=language,
|
|
208
|
+
imports=imports,
|
|
209
|
+
symbol=symbol,
|
|
210
|
+
raw_source=source,
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# Fallback to line-based extraction
|
|
214
|
+
return self._line_based_context(resolved_path, lines, line_number, language, imports)
|
|
215
|
+
|
|
216
|
+
# Fallback if parser fails
|
|
217
|
+
return self._line_based_context(resolved_path, lines, line_number, language)
|
|
218
|
+
|
|
219
|
+
def _line_based_context(
|
|
220
|
+
self,
|
|
221
|
+
filepath: str,
|
|
222
|
+
lines: List[str],
|
|
223
|
+
line_number: int,
|
|
224
|
+
language: Language,
|
|
225
|
+
imports: Optional[List[Import]] = None,
|
|
226
|
+
) -> FileContext:
|
|
227
|
+
"""Create context using line-based extraction.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
filepath: Path to the file
|
|
231
|
+
lines: Source lines
|
|
232
|
+
line_number: Error line number
|
|
233
|
+
language: Language of the file
|
|
234
|
+
imports: Optional pre-extracted imports
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
FileContext with line-based content
|
|
238
|
+
"""
|
|
239
|
+
context_lines = min(50, self.max_context_lines // 2)
|
|
240
|
+
start = max(1, line_number - context_lines)
|
|
241
|
+
end = min(len(lines), line_number + context_lines)
|
|
242
|
+
|
|
243
|
+
snippet_lines = []
|
|
244
|
+
for i in range(start - 1, end):
|
|
245
|
+
num = i + 1
|
|
246
|
+
marker = " >> " if num == line_number else " "
|
|
247
|
+
snippet_lines.append(f"{marker}{num:4d} | {lines[i]}")
|
|
248
|
+
|
|
249
|
+
return FileContext(
|
|
250
|
+
filepath=filepath,
|
|
251
|
+
line_number=line_number,
|
|
252
|
+
context_type="lines",
|
|
253
|
+
content="\n".join(snippet_lines),
|
|
254
|
+
language=language,
|
|
255
|
+
imports=imports or [],
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
def _get_primary_context(
|
|
259
|
+
self,
|
|
260
|
+
contexts: List[FileContext],
|
|
261
|
+
traceback: ParsedTraceback,
|
|
262
|
+
) -> FileContext:
|
|
263
|
+
"""Determine the primary error context.
|
|
264
|
+
|
|
265
|
+
Usually the last non-missing context in the traceback.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
contexts: All file contexts
|
|
269
|
+
traceback: Parsed traceback
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
The primary FileContext
|
|
273
|
+
"""
|
|
274
|
+
# Try to find context for the primary frame
|
|
275
|
+
if traceback.primary_frame:
|
|
276
|
+
for ctx in reversed(contexts):
|
|
277
|
+
if ctx.filepath.endswith(traceback.primary_frame.filepath) or \
|
|
278
|
+
traceback.primary_frame.filepath.endswith(ctx.filepath):
|
|
279
|
+
if ctx.context_type != "missing":
|
|
280
|
+
return ctx
|
|
281
|
+
|
|
282
|
+
# Fallback: last non-missing context
|
|
283
|
+
for ctx in reversed(contexts):
|
|
284
|
+
if ctx.context_type != "missing":
|
|
285
|
+
return ctx
|
|
286
|
+
|
|
287
|
+
# If all missing, return the last one
|
|
288
|
+
return contexts[-1]
|
|
289
|
+
|
|
290
|
+
def _build_upstream_context(
|
|
291
|
+
self,
|
|
292
|
+
primary_context: FileContext,
|
|
293
|
+
traceback_contexts: List[FileContext],
|
|
294
|
+
traceback: ParsedTraceback,
|
|
295
|
+
) -> Optional[UpstreamContext]:
|
|
296
|
+
"""Build upstream context for deep debugging.
|
|
297
|
+
|
|
298
|
+
Analyzes imports and dependencies to find potentially
|
|
299
|
+
relevant upstream code.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
primary_context: The primary error context
|
|
303
|
+
traceback_contexts: All traceback contexts
|
|
304
|
+
traceback: Parsed traceback
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
UpstreamContext or None if no upstream context found
|
|
308
|
+
"""
|
|
309
|
+
upstream_files = []
|
|
310
|
+
relevant_definitions = {}
|
|
311
|
+
|
|
312
|
+
# Get call chain
|
|
313
|
+
call_chain = self.call_chain_analyzer.analyze_from_contexts(
|
|
314
|
+
traceback_contexts, traceback
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
# Find upstream files from imports
|
|
318
|
+
for ctx in traceback_contexts:
|
|
319
|
+
for imp in ctx.imports:
|
|
320
|
+
if imp.resolved_path and imp.resolved_path not in upstream_files:
|
|
321
|
+
# Check if this import is already in traceback
|
|
322
|
+
is_in_traceback = any(
|
|
323
|
+
tc.filepath == imp.resolved_path
|
|
324
|
+
for tc in traceback_contexts
|
|
325
|
+
)
|
|
326
|
+
if not is_in_traceback:
|
|
327
|
+
upstream_files.append(imp.resolved_path)
|
|
328
|
+
|
|
329
|
+
# Get files that depend on the primary context
|
|
330
|
+
dependents = self.dependency_graph.get_dependents(primary_context.filepath)
|
|
331
|
+
for dep in dependents:
|
|
332
|
+
if dep not in upstream_files:
|
|
333
|
+
upstream_files.append(dep)
|
|
334
|
+
|
|
335
|
+
# Limit to max_upstream_files
|
|
336
|
+
upstream_files = upstream_files[:self.max_upstream_files]
|
|
337
|
+
|
|
338
|
+
# Extract contexts for upstream files
|
|
339
|
+
upstream_contexts = []
|
|
340
|
+
for filepath in upstream_files:
|
|
341
|
+
ctx = self._extract_single_context(
|
|
342
|
+
filepath,
|
|
343
|
+
1, # Just get the file overview
|
|
344
|
+
detect_language(filepath),
|
|
345
|
+
)
|
|
346
|
+
if ctx and ctx.context_type != "missing":
|
|
347
|
+
upstream_contexts.append(ctx)
|
|
348
|
+
|
|
349
|
+
if not upstream_contexts and not call_chain.sites:
|
|
350
|
+
return None
|
|
351
|
+
|
|
352
|
+
# Build dependency summary
|
|
353
|
+
summary = self.dependency_graph.get_summary()
|
|
354
|
+
|
|
355
|
+
return UpstreamContext(
|
|
356
|
+
file_contexts=upstream_contexts,
|
|
357
|
+
call_chain=call_chain.to_string_list(),
|
|
358
|
+
relevant_definitions=relevant_definitions,
|
|
359
|
+
dependency_summary=summary,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
def _create_minimal_context(
|
|
363
|
+
self,
|
|
364
|
+
error_log: str,
|
|
365
|
+
traceback: ParsedTraceback,
|
|
366
|
+
) -> AnalysisContext:
|
|
367
|
+
"""Create context when no explicit traceback files are found.
|
|
368
|
+
|
|
369
|
+
Uses project scanning and error analysis to find relevant files.
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
error_log: Original error log
|
|
373
|
+
traceback: Parsed traceback
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
AnalysisContext with project-aware context
|
|
377
|
+
"""
|
|
378
|
+
# Analyze the error to find relevant files
|
|
379
|
+
error_analysis = self.error_analyzer.analyze(error_log)
|
|
380
|
+
|
|
381
|
+
# Get relevant files from error analysis
|
|
382
|
+
relevant_files = error_analysis.relevant_files
|
|
383
|
+
|
|
384
|
+
# If we found relevant files, extract their contexts
|
|
385
|
+
file_contexts = []
|
|
386
|
+
primary_context = None
|
|
387
|
+
|
|
388
|
+
if relevant_files:
|
|
389
|
+
# Try to create contexts for relevant files
|
|
390
|
+
for pf in relevant_files[:3]: # Top 3 most relevant
|
|
391
|
+
full_path = str(self.project_root / pf.path)
|
|
392
|
+
ctx = self._extract_single_context(
|
|
393
|
+
full_path,
|
|
394
|
+
1, # Start of file
|
|
395
|
+
pf.language,
|
|
396
|
+
)
|
|
397
|
+
if ctx and ctx.context_type != "missing":
|
|
398
|
+
file_contexts.append(ctx)
|
|
399
|
+
if primary_context is None:
|
|
400
|
+
primary_context = ctx
|
|
401
|
+
|
|
402
|
+
# If still no context, use entry points
|
|
403
|
+
if not primary_context and self.project_info.entry_points:
|
|
404
|
+
for ep in self.project_info.entry_points[:2]:
|
|
405
|
+
full_path = str(self.project_root / ep.path)
|
|
406
|
+
ctx = self._extract_single_context(
|
|
407
|
+
full_path,
|
|
408
|
+
1,
|
|
409
|
+
ep.language,
|
|
410
|
+
)
|
|
411
|
+
if ctx and ctx.context_type != "missing":
|
|
412
|
+
file_contexts.append(ctx)
|
|
413
|
+
if primary_context is None:
|
|
414
|
+
primary_context = ctx
|
|
415
|
+
|
|
416
|
+
# Create synthetic primary context if still none found
|
|
417
|
+
if primary_context is None:
|
|
418
|
+
primary_context = FileContext(
|
|
419
|
+
filepath="<error_log>",
|
|
420
|
+
line_number=0,
|
|
421
|
+
context_type="error_analysis",
|
|
422
|
+
content=error_log,
|
|
423
|
+
language=error_analysis.suggested_language or traceback.language,
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
# Build upstream context with project info
|
|
427
|
+
upstream_context = None
|
|
428
|
+
if file_contexts:
|
|
429
|
+
upstream_context = UpstreamContext(
|
|
430
|
+
file_contexts=file_contexts[1:] if len(file_contexts) > 1 else [],
|
|
431
|
+
call_chain=[],
|
|
432
|
+
relevant_definitions={},
|
|
433
|
+
dependency_summary=self.project_info.to_summary(),
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
return AnalysisContext(
|
|
437
|
+
primary_context=primary_context,
|
|
438
|
+
traceback_contexts=file_contexts,
|
|
439
|
+
upstream_context=upstream_context,
|
|
440
|
+
parsed_traceback=traceback,
|
|
441
|
+
project_root=str(self.project_root),
|
|
442
|
+
error_analysis=error_analysis,
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
def _resolve_file_path(self, filepath: str) -> Optional[str]:
|
|
446
|
+
"""Resolve a file path to an actual file.
|
|
447
|
+
|
|
448
|
+
Args:
|
|
449
|
+
filepath: Path from traceback
|
|
450
|
+
|
|
451
|
+
Returns:
|
|
452
|
+
Resolved path or None
|
|
453
|
+
"""
|
|
454
|
+
# Try as-is
|
|
455
|
+
if os.path.isfile(filepath):
|
|
456
|
+
return filepath
|
|
457
|
+
|
|
458
|
+
# Try relative to project root
|
|
459
|
+
relative = self.project_root / filepath
|
|
460
|
+
if relative.is_file():
|
|
461
|
+
return str(relative)
|
|
462
|
+
|
|
463
|
+
# Try just the filename in project
|
|
464
|
+
filename = Path(filepath).name
|
|
465
|
+
for root, dirs, files in os.walk(self.project_root):
|
|
466
|
+
if filename in files:
|
|
467
|
+
return os.path.join(root, filename)
|
|
468
|
+
|
|
469
|
+
# Try common source directories
|
|
470
|
+
for src_dir in ['src', 'lib', 'app', 'pkg', '.']:
|
|
471
|
+
candidate = self.project_root / src_dir / filename
|
|
472
|
+
if candidate.is_file():
|
|
473
|
+
return str(candidate)
|
|
474
|
+
|
|
475
|
+
return None
|
|
476
|
+
|
|
477
|
+
def get_context_for_prompt(
|
|
478
|
+
self,
|
|
479
|
+
analysis_context: AnalysisContext,
|
|
480
|
+
include_upstream: bool = True,
|
|
481
|
+
include_project_info: bool = True,
|
|
482
|
+
) -> str:
|
|
483
|
+
"""Format analysis context for AI prompt.
|
|
484
|
+
|
|
485
|
+
Args:
|
|
486
|
+
analysis_context: The AnalysisContext to format
|
|
487
|
+
include_upstream: Whether to include upstream context
|
|
488
|
+
include_project_info: Whether to include project structure info
|
|
489
|
+
|
|
490
|
+
Returns:
|
|
491
|
+
Formatted context string
|
|
492
|
+
"""
|
|
493
|
+
parts = []
|
|
494
|
+
|
|
495
|
+
# Include project structure info for context
|
|
496
|
+
if include_project_info and self._project_info:
|
|
497
|
+
parts.append("## PROJECT INFORMATION")
|
|
498
|
+
parts.append(f"Type: {self._project_info.project_type}")
|
|
499
|
+
parts.append(f"Language: {self._project_info.primary_language.value}")
|
|
500
|
+
if self._project_info.frameworks_detected:
|
|
501
|
+
parts.append(f"Frameworks: {', '.join(self._project_info.frameworks_detected)}")
|
|
502
|
+
if self._project_info.entry_points:
|
|
503
|
+
parts.append(f"Entry Points: {', '.join(ep.path for ep in self._project_info.entry_points[:3])}")
|
|
504
|
+
parts.append("")
|
|
505
|
+
|
|
506
|
+
# Include error analysis if present
|
|
507
|
+
if analysis_context.error_analysis:
|
|
508
|
+
ea = analysis_context.error_analysis
|
|
509
|
+
parts.append("## ERROR ANALYSIS")
|
|
510
|
+
parts.append(f"Error Type: {ea.error_type}")
|
|
511
|
+
parts.append(f"Category: {ea.error_category}")
|
|
512
|
+
if ea.affected_routes:
|
|
513
|
+
parts.append(f"Affected Routes: {', '.join(ea.affected_routes)}")
|
|
514
|
+
if ea.relevant_files:
|
|
515
|
+
parts.append(f"Relevant Files: {', '.join(f.path for f in ea.relevant_files[:5])}")
|
|
516
|
+
parts.append("")
|
|
517
|
+
|
|
518
|
+
# Primary context
|
|
519
|
+
parts.append("## PRIMARY ERROR LOCATION")
|
|
520
|
+
if analysis_context.primary_context.filepath != "<error_log>":
|
|
521
|
+
parts.append(f"File: {analysis_context.primary_context.filepath}")
|
|
522
|
+
parts.append(f"Line: {analysis_context.primary_context.line_number}")
|
|
523
|
+
if analysis_context.primary_context.function_name:
|
|
524
|
+
parts.append(f"Function: {analysis_context.primary_context.function_name}")
|
|
525
|
+
parts.append(f"Language: {analysis_context.primary_context.language.value}")
|
|
526
|
+
parts.append("\n```")
|
|
527
|
+
parts.append(analysis_context.primary_context.content)
|
|
528
|
+
parts.append("```\n")
|
|
529
|
+
else:
|
|
530
|
+
# No specific file found - show error message
|
|
531
|
+
parts.append("(No specific file path in error)")
|
|
532
|
+
parts.append(f"Language: {analysis_context.primary_context.language.value}")
|
|
533
|
+
parts.append("\nError Message:")
|
|
534
|
+
parts.append(analysis_context.primary_context.content[:1000])
|
|
535
|
+
parts.append("")
|
|
536
|
+
|
|
537
|
+
# Other traceback locations
|
|
538
|
+
other_contexts = [
|
|
539
|
+
ctx for ctx in analysis_context.traceback_contexts
|
|
540
|
+
if ctx.filepath != analysis_context.primary_context.filepath
|
|
541
|
+
]
|
|
542
|
+
if other_contexts:
|
|
543
|
+
parts.append("## CALL STACK CONTEXT")
|
|
544
|
+
for ctx in other_contexts:
|
|
545
|
+
parts.append(f"\n### {ctx.filepath}:{ctx.line_number}")
|
|
546
|
+
if ctx.function_name:
|
|
547
|
+
parts.append(f"Function: {ctx.function_name}")
|
|
548
|
+
parts.append("```")
|
|
549
|
+
parts.append(ctx.content)
|
|
550
|
+
parts.append("```")
|
|
551
|
+
|
|
552
|
+
# Upstream context
|
|
553
|
+
if include_upstream and analysis_context.upstream_context:
|
|
554
|
+
parts.append("\n## UPSTREAM CONTEXT (for root cause analysis)")
|
|
555
|
+
parts.append(analysis_context.upstream_context.to_prompt_text())
|
|
556
|
+
|
|
557
|
+
return "\n".join(parts)
|
|
558
|
+
|
|
559
|
+
def get_deep_context(self, error_log: str, language_hint: Optional[Language] = None) -> str:
|
|
560
|
+
"""Get comprehensive context for an error with full project awareness.
|
|
561
|
+
|
|
562
|
+
This method provides THOROUGH context for the AI, including:
|
|
563
|
+
- Project structure and frameworks
|
|
564
|
+
- Error analysis
|
|
565
|
+
- FULL contents of relevant files (not truncated)
|
|
566
|
+
- Entry point contents
|
|
567
|
+
- Related file existence checks
|
|
568
|
+
|
|
569
|
+
Args:
|
|
570
|
+
error_log: The error message/log
|
|
571
|
+
language_hint: Optional language hint
|
|
572
|
+
|
|
573
|
+
Returns:
|
|
574
|
+
Comprehensive context string for AI
|
|
575
|
+
"""
|
|
576
|
+
# Build analysis context
|
|
577
|
+
analysis_ctx = self.build_analysis_context(error_log, language_hint=language_hint)
|
|
578
|
+
|
|
579
|
+
parts = []
|
|
580
|
+
|
|
581
|
+
# Project info
|
|
582
|
+
if self._project_info:
|
|
583
|
+
parts.append("## PROJECT INFORMATION")
|
|
584
|
+
parts.append(f"Type: {self._project_info.project_type}")
|
|
585
|
+
parts.append(f"Language: {self._project_info.primary_language.value}")
|
|
586
|
+
if self._project_info.frameworks_detected:
|
|
587
|
+
parts.append(f"Frameworks: {', '.join(self._project_info.frameworks_detected)}")
|
|
588
|
+
parts.append("")
|
|
589
|
+
|
|
590
|
+
# Error analysis
|
|
591
|
+
if analysis_ctx.error_analysis:
|
|
592
|
+
ea = analysis_ctx.error_analysis
|
|
593
|
+
parts.append("## ERROR ANALYSIS")
|
|
594
|
+
parts.append(f"Error Type: {ea.error_type}")
|
|
595
|
+
parts.append(f"Category: {ea.error_category}")
|
|
596
|
+
if ea.affected_routes:
|
|
597
|
+
parts.append(f"Affected Routes: {', '.join(ea.affected_routes)}")
|
|
598
|
+
parts.append("")
|
|
599
|
+
|
|
600
|
+
# Original error message
|
|
601
|
+
parts.append("## ORIGINAL ERROR")
|
|
602
|
+
parts.append("```")
|
|
603
|
+
parts.append(error_log)
|
|
604
|
+
parts.append("```")
|
|
605
|
+
parts.append("")
|
|
606
|
+
|
|
607
|
+
# Check for file paths mentioned in error
|
|
608
|
+
parts.append("## FILE EXISTENCE CHECK")
|
|
609
|
+
import re
|
|
610
|
+
file_paths = re.findall(r'[/\w\-\.]+\.(?:html|js|ts|py|css|json)', error_log)
|
|
611
|
+
for fp in file_paths[:5]:
|
|
612
|
+
full_path = self.project_root / fp.lstrip('/')
|
|
613
|
+
exists = full_path.exists()
|
|
614
|
+
parts.append(f"- {fp}: {'EXISTS' if exists else 'MISSING'}")
|
|
615
|
+
parts.append("")
|
|
616
|
+
|
|
617
|
+
# FULL contents of relevant files - this is the key fix
|
|
618
|
+
files_added = set()
|
|
619
|
+
parts.append("## SOURCE FILES TO ANALYZE AND FIX")
|
|
620
|
+
parts.append("(Read these files carefully before suggesting fixes)")
|
|
621
|
+
parts.append("")
|
|
622
|
+
|
|
623
|
+
# Add entry points with FULL content
|
|
624
|
+
for ep in self.project_info.entry_points[:3]:
|
|
625
|
+
if ep.path in files_added:
|
|
626
|
+
continue
|
|
627
|
+
content = self.project_scanner.get_file_content(ep.path)
|
|
628
|
+
if content:
|
|
629
|
+
files_added.add(ep.path)
|
|
630
|
+
parts.append(f"### FILE: {ep.path}")
|
|
631
|
+
parts.append(f"Language: {ep.language.value}")
|
|
632
|
+
parts.append(f"```{ep.language.value}")
|
|
633
|
+
parts.append(content) # FULL content, no truncation
|
|
634
|
+
parts.append("```")
|
|
635
|
+
parts.append("")
|
|
636
|
+
|
|
637
|
+
# Add relevant files from error analysis with FULL content
|
|
638
|
+
if analysis_ctx.error_analysis and analysis_ctx.error_analysis.relevant_files:
|
|
639
|
+
for rf in analysis_ctx.error_analysis.relevant_files[:5]:
|
|
640
|
+
if rf.path in files_added:
|
|
641
|
+
continue
|
|
642
|
+
content = self.project_scanner.get_file_content(rf.path)
|
|
643
|
+
if content:
|
|
644
|
+
files_added.add(rf.path)
|
|
645
|
+
parts.append(f"### FILE: {rf.path}")
|
|
646
|
+
parts.append(f"Language: {rf.language.value}")
|
|
647
|
+
parts.append(f"```{rf.language.value}")
|
|
648
|
+
parts.append(content) # FULL content
|
|
649
|
+
parts.append("```")
|
|
650
|
+
parts.append("")
|
|
651
|
+
|
|
652
|
+
# If error mentions specific directories, check their structure
|
|
653
|
+
if 'public' in error_log.lower() or 'static' in error_log.lower():
|
|
654
|
+
parts.append("## DIRECTORY STRUCTURE CHECK")
|
|
655
|
+
for dirname in ['public', 'static', 'build', 'dist']:
|
|
656
|
+
dir_path = self.project_root / dirname
|
|
657
|
+
if dir_path.exists():
|
|
658
|
+
files = list(dir_path.iterdir())[:10]
|
|
659
|
+
parts.append(f"- {dirname}/: {[f.name for f in files]}")
|
|
660
|
+
else:
|
|
661
|
+
parts.append(f"- {dirname}/: DOES NOT EXIST")
|
|
662
|
+
parts.append("")
|
|
663
|
+
|
|
664
|
+
# Instructions for AI
|
|
665
|
+
parts.append("## INSTRUCTIONS")
|
|
666
|
+
parts.append("1. Read the source files above CAREFULLY")
|
|
667
|
+
parts.append("2. Understand what the code is trying to do")
|
|
668
|
+
parts.append("3. Identify the root cause of the error")
|
|
669
|
+
parts.append("4. Provide a COMPLETE fix with the full corrected file content")
|
|
670
|
+
parts.append("5. The fix should be ready to save directly to the file")
|
|
671
|
+
|
|
672
|
+
return "\n".join(parts)
|