nc1709 1.15.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nc1709/__init__.py +13 -0
- nc1709/agent/__init__.py +36 -0
- nc1709/agent/core.py +505 -0
- nc1709/agent/mcp_bridge.py +245 -0
- nc1709/agent/permissions.py +298 -0
- nc1709/agent/tools/__init__.py +21 -0
- nc1709/agent/tools/base.py +440 -0
- nc1709/agent/tools/bash_tool.py +367 -0
- nc1709/agent/tools/file_tools.py +454 -0
- nc1709/agent/tools/notebook_tools.py +516 -0
- nc1709/agent/tools/search_tools.py +322 -0
- nc1709/agent/tools/task_tool.py +284 -0
- nc1709/agent/tools/web_tools.py +555 -0
- nc1709/agents/__init__.py +17 -0
- nc1709/agents/auto_fix.py +506 -0
- nc1709/agents/test_generator.py +507 -0
- nc1709/checkpoints.py +372 -0
- nc1709/cli.py +3380 -0
- nc1709/cli_ui.py +1080 -0
- nc1709/cognitive/__init__.py +149 -0
- nc1709/cognitive/anticipation.py +594 -0
- nc1709/cognitive/context_engine.py +1046 -0
- nc1709/cognitive/council.py +824 -0
- nc1709/cognitive/learning.py +761 -0
- nc1709/cognitive/router.py +583 -0
- nc1709/cognitive/system.py +519 -0
- nc1709/config.py +155 -0
- nc1709/custom_commands.py +300 -0
- nc1709/executor.py +333 -0
- nc1709/file_controller.py +354 -0
- nc1709/git_integration.py +308 -0
- nc1709/github_integration.py +477 -0
- nc1709/image_input.py +446 -0
- nc1709/linting.py +519 -0
- nc1709/llm_adapter.py +667 -0
- nc1709/logger.py +192 -0
- nc1709/mcp/__init__.py +18 -0
- nc1709/mcp/client.py +370 -0
- nc1709/mcp/manager.py +407 -0
- nc1709/mcp/protocol.py +210 -0
- nc1709/mcp/server.py +473 -0
- nc1709/memory/__init__.py +20 -0
- nc1709/memory/embeddings.py +325 -0
- nc1709/memory/indexer.py +474 -0
- nc1709/memory/sessions.py +432 -0
- nc1709/memory/vector_store.py +451 -0
- nc1709/models/__init__.py +86 -0
- nc1709/models/detector.py +377 -0
- nc1709/models/formats.py +315 -0
- nc1709/models/manager.py +438 -0
- nc1709/models/registry.py +497 -0
- nc1709/performance/__init__.py +343 -0
- nc1709/performance/cache.py +705 -0
- nc1709/performance/pipeline.py +611 -0
- nc1709/performance/tiering.py +543 -0
- nc1709/plan_mode.py +362 -0
- nc1709/plugins/__init__.py +17 -0
- nc1709/plugins/agents/__init__.py +18 -0
- nc1709/plugins/agents/django_agent.py +912 -0
- nc1709/plugins/agents/docker_agent.py +623 -0
- nc1709/plugins/agents/fastapi_agent.py +887 -0
- nc1709/plugins/agents/git_agent.py +731 -0
- nc1709/plugins/agents/nextjs_agent.py +867 -0
- nc1709/plugins/base.py +359 -0
- nc1709/plugins/manager.py +411 -0
- nc1709/plugins/registry.py +337 -0
- nc1709/progress.py +443 -0
- nc1709/prompts/__init__.py +22 -0
- nc1709/prompts/agent_system.py +180 -0
- nc1709/prompts/task_prompts.py +340 -0
- nc1709/prompts/unified_prompt.py +133 -0
- nc1709/reasoning_engine.py +541 -0
- nc1709/remote_client.py +266 -0
- nc1709/shell_completions.py +349 -0
- nc1709/slash_commands.py +649 -0
- nc1709/task_classifier.py +408 -0
- nc1709/version_check.py +177 -0
- nc1709/web/__init__.py +8 -0
- nc1709/web/server.py +950 -0
- nc1709/web/templates/index.html +1127 -0
- nc1709-1.15.4.dist-info/METADATA +858 -0
- nc1709-1.15.4.dist-info/RECORD +86 -0
- nc1709-1.15.4.dist-info/WHEEL +5 -0
- nc1709-1.15.4.dist-info/entry_points.txt +2 -0
- nc1709-1.15.4.dist-info/licenses/LICENSE +9 -0
- nc1709-1.15.4.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Auto-Fix Agent
|
|
3
|
+
Automatically detects and fixes code errors using LLM
|
|
4
|
+
"""
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import subprocess
|
|
8
|
+
from typing import List, Dict, Any, Optional, Tuple
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from enum import Enum
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ErrorType(Enum):
|
|
14
|
+
"""Types of code errors"""
|
|
15
|
+
SYNTAX = "syntax"
|
|
16
|
+
TYPE = "type"
|
|
17
|
+
IMPORT = "import"
|
|
18
|
+
RUNTIME = "runtime"
|
|
19
|
+
LINT = "lint"
|
|
20
|
+
TEST = "test"
|
|
21
|
+
BUILD = "build"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class CodeError:
|
|
26
|
+
"""Represents a code error"""
|
|
27
|
+
file_path: str
|
|
28
|
+
line_number: int
|
|
29
|
+
column: Optional[int]
|
|
30
|
+
message: str
|
|
31
|
+
error_type: ErrorType
|
|
32
|
+
code_snippet: Optional[str] = None
|
|
33
|
+
suggestion: Optional[str] = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class Fix:
|
|
38
|
+
"""Represents a fix for an error"""
|
|
39
|
+
file_path: str
|
|
40
|
+
original_code: str
|
|
41
|
+
fixed_code: str
|
|
42
|
+
description: str
|
|
43
|
+
confidence: float
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class AutoFixAgent:
|
|
47
|
+
"""Agent that automatically detects and fixes code errors"""
|
|
48
|
+
|
|
49
|
+
def __init__(self, llm_adapter=None):
|
|
50
|
+
"""Initialize the auto-fix agent
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
llm_adapter: LLMAdapter instance for generating fixes
|
|
54
|
+
"""
|
|
55
|
+
self.llm = llm_adapter
|
|
56
|
+
self._error_parsers = {
|
|
57
|
+
"python": self._parse_python_errors,
|
|
58
|
+
"javascript": self._parse_js_errors,
|
|
59
|
+
"typescript": self._parse_ts_errors,
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
def analyze_file(self, file_path: str) -> List[CodeError]:
|
|
63
|
+
"""Analyze a file for errors
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
file_path: Path to the file
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
List of detected errors
|
|
70
|
+
"""
|
|
71
|
+
if not os.path.exists(file_path):
|
|
72
|
+
return []
|
|
73
|
+
|
|
74
|
+
# Detect language
|
|
75
|
+
ext = os.path.splitext(file_path)[1].lower()
|
|
76
|
+
language = self._get_language(ext)
|
|
77
|
+
|
|
78
|
+
errors = []
|
|
79
|
+
|
|
80
|
+
# Run language-specific linters/checkers
|
|
81
|
+
if language == "python":
|
|
82
|
+
errors.extend(self._check_python(file_path))
|
|
83
|
+
elif language in ("javascript", "typescript"):
|
|
84
|
+
errors.extend(self._check_js_ts(file_path, language))
|
|
85
|
+
|
|
86
|
+
return errors
|
|
87
|
+
|
|
88
|
+
def analyze_output(self, output: str, language: str = "python") -> List[CodeError]:
|
|
89
|
+
"""Analyze error output from a command
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
output: Error output text
|
|
93
|
+
language: Programming language
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
List of detected errors
|
|
97
|
+
"""
|
|
98
|
+
parser = self._error_parsers.get(language, self._parse_generic_errors)
|
|
99
|
+
return parser(output)
|
|
100
|
+
|
|
101
|
+
def fix_errors(
|
|
102
|
+
self,
|
|
103
|
+
errors: List[CodeError],
|
|
104
|
+
auto_apply: bool = False
|
|
105
|
+
) -> List[Fix]:
|
|
106
|
+
"""Generate fixes for errors
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
errors: List of errors to fix
|
|
110
|
+
auto_apply: Whether to automatically apply fixes
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
List of generated fixes
|
|
114
|
+
"""
|
|
115
|
+
if not self.llm:
|
|
116
|
+
raise RuntimeError("LLM adapter required for generating fixes")
|
|
117
|
+
|
|
118
|
+
fixes = []
|
|
119
|
+
|
|
120
|
+
for error in errors:
|
|
121
|
+
fix = self._generate_fix(error)
|
|
122
|
+
if fix:
|
|
123
|
+
fixes.append(fix)
|
|
124
|
+
|
|
125
|
+
if auto_apply:
|
|
126
|
+
self._apply_fix(fix)
|
|
127
|
+
|
|
128
|
+
return fixes
|
|
129
|
+
|
|
130
|
+
def fix_file(
|
|
131
|
+
self,
|
|
132
|
+
file_path: str,
|
|
133
|
+
auto_apply: bool = False
|
|
134
|
+
) -> Tuple[List[CodeError], List[Fix]]:
|
|
135
|
+
"""Analyze and fix errors in a file
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
file_path: Path to the file
|
|
139
|
+
auto_apply: Whether to automatically apply fixes
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Tuple of (errors, fixes)
|
|
143
|
+
"""
|
|
144
|
+
errors = self.analyze_file(file_path)
|
|
145
|
+
fixes = self.fix_errors(errors, auto_apply)
|
|
146
|
+
return errors, fixes
|
|
147
|
+
|
|
148
|
+
def _get_language(self, ext: str) -> str:
|
|
149
|
+
"""Get language from file extension"""
|
|
150
|
+
mapping = {
|
|
151
|
+
".py": "python",
|
|
152
|
+
".js": "javascript",
|
|
153
|
+
".jsx": "javascript",
|
|
154
|
+
".ts": "typescript",
|
|
155
|
+
".tsx": "typescript",
|
|
156
|
+
".go": "go",
|
|
157
|
+
".rs": "rust",
|
|
158
|
+
".java": "java",
|
|
159
|
+
".rb": "ruby",
|
|
160
|
+
".php": "php",
|
|
161
|
+
}
|
|
162
|
+
return mapping.get(ext, "unknown")
|
|
163
|
+
|
|
164
|
+
def _check_python(self, file_path: str) -> List[CodeError]:
|
|
165
|
+
"""Check Python file for errors"""
|
|
166
|
+
errors = []
|
|
167
|
+
|
|
168
|
+
# Check syntax with Python
|
|
169
|
+
try:
|
|
170
|
+
result = subprocess.run(
|
|
171
|
+
["python", "-m", "py_compile", file_path],
|
|
172
|
+
capture_output=True,
|
|
173
|
+
text=True
|
|
174
|
+
)
|
|
175
|
+
if result.returncode != 0:
|
|
176
|
+
errors.extend(self._parse_python_errors(result.stderr))
|
|
177
|
+
except Exception:
|
|
178
|
+
pass
|
|
179
|
+
|
|
180
|
+
# Run flake8 if available
|
|
181
|
+
try:
|
|
182
|
+
result = subprocess.run(
|
|
183
|
+
["flake8", "--max-line-length=100", file_path],
|
|
184
|
+
capture_output=True,
|
|
185
|
+
text=True
|
|
186
|
+
)
|
|
187
|
+
if result.stdout:
|
|
188
|
+
errors.extend(self._parse_flake8_output(result.stdout))
|
|
189
|
+
except FileNotFoundError:
|
|
190
|
+
pass
|
|
191
|
+
|
|
192
|
+
# Run mypy if available
|
|
193
|
+
try:
|
|
194
|
+
result = subprocess.run(
|
|
195
|
+
["mypy", "--ignore-missing-imports", file_path],
|
|
196
|
+
capture_output=True,
|
|
197
|
+
text=True
|
|
198
|
+
)
|
|
199
|
+
if result.stdout:
|
|
200
|
+
errors.extend(self._parse_mypy_output(result.stdout))
|
|
201
|
+
except FileNotFoundError:
|
|
202
|
+
pass
|
|
203
|
+
|
|
204
|
+
return errors
|
|
205
|
+
|
|
206
|
+
def _check_js_ts(self, file_path: str, language: str) -> List[CodeError]:
|
|
207
|
+
"""Check JavaScript/TypeScript file for errors"""
|
|
208
|
+
errors = []
|
|
209
|
+
|
|
210
|
+
# Run ESLint if available
|
|
211
|
+
try:
|
|
212
|
+
result = subprocess.run(
|
|
213
|
+
["npx", "eslint", "--format", "json", file_path],
|
|
214
|
+
capture_output=True,
|
|
215
|
+
text=True
|
|
216
|
+
)
|
|
217
|
+
if result.stdout:
|
|
218
|
+
errors.extend(self._parse_eslint_output(result.stdout))
|
|
219
|
+
except FileNotFoundError:
|
|
220
|
+
pass
|
|
221
|
+
|
|
222
|
+
# Run TypeScript compiler for type checking
|
|
223
|
+
if language == "typescript":
|
|
224
|
+
try:
|
|
225
|
+
result = subprocess.run(
|
|
226
|
+
["npx", "tsc", "--noEmit", file_path],
|
|
227
|
+
capture_output=True,
|
|
228
|
+
text=True
|
|
229
|
+
)
|
|
230
|
+
if result.stdout or result.stderr:
|
|
231
|
+
errors.extend(self._parse_ts_errors(result.stdout + result.stderr))
|
|
232
|
+
except FileNotFoundError:
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
return errors
|
|
236
|
+
|
|
237
|
+
def _parse_python_errors(self, output: str) -> List[CodeError]:
|
|
238
|
+
"""Parse Python error output"""
|
|
239
|
+
errors = []
|
|
240
|
+
|
|
241
|
+
# Syntax errors
|
|
242
|
+
syntax_pattern = r'File "([^"]+)", line (\d+)'
|
|
243
|
+
matches = re.finditer(syntax_pattern, output)
|
|
244
|
+
|
|
245
|
+
for match in matches:
|
|
246
|
+
file_path = match.group(1)
|
|
247
|
+
line_num = int(match.group(2))
|
|
248
|
+
|
|
249
|
+
# Extract error message
|
|
250
|
+
lines = output[match.end():].split('\n')
|
|
251
|
+
message = lines[0].strip() if lines else "Syntax error"
|
|
252
|
+
|
|
253
|
+
errors.append(CodeError(
|
|
254
|
+
file_path=file_path,
|
|
255
|
+
line_number=line_num,
|
|
256
|
+
column=None,
|
|
257
|
+
message=message,
|
|
258
|
+
error_type=ErrorType.SYNTAX
|
|
259
|
+
))
|
|
260
|
+
|
|
261
|
+
return errors
|
|
262
|
+
|
|
263
|
+
def _parse_flake8_output(self, output: str) -> List[CodeError]:
|
|
264
|
+
"""Parse flake8 output"""
|
|
265
|
+
errors = []
|
|
266
|
+
|
|
267
|
+
# Format: file:line:col: code message
|
|
268
|
+
pattern = r'([^:]+):(\d+):(\d+): ([A-Z]\d+) (.+)'
|
|
269
|
+
matches = re.finditer(pattern, output)
|
|
270
|
+
|
|
271
|
+
for match in matches:
|
|
272
|
+
errors.append(CodeError(
|
|
273
|
+
file_path=match.group(1),
|
|
274
|
+
line_number=int(match.group(2)),
|
|
275
|
+
column=int(match.group(3)),
|
|
276
|
+
message=f"{match.group(4)}: {match.group(5)}",
|
|
277
|
+
error_type=ErrorType.LINT
|
|
278
|
+
))
|
|
279
|
+
|
|
280
|
+
return errors
|
|
281
|
+
|
|
282
|
+
def _parse_mypy_output(self, output: str) -> List[CodeError]:
|
|
283
|
+
"""Parse mypy output"""
|
|
284
|
+
errors = []
|
|
285
|
+
|
|
286
|
+
# Format: file:line: error: message
|
|
287
|
+
pattern = r'([^:]+):(\d+): error: (.+)'
|
|
288
|
+
matches = re.finditer(pattern, output)
|
|
289
|
+
|
|
290
|
+
for match in matches:
|
|
291
|
+
errors.append(CodeError(
|
|
292
|
+
file_path=match.group(1),
|
|
293
|
+
line_number=int(match.group(2)),
|
|
294
|
+
column=None,
|
|
295
|
+
message=match.group(3),
|
|
296
|
+
error_type=ErrorType.TYPE
|
|
297
|
+
))
|
|
298
|
+
|
|
299
|
+
return errors
|
|
300
|
+
|
|
301
|
+
def _parse_js_errors(self, output: str) -> List[CodeError]:
|
|
302
|
+
"""Parse JavaScript error output"""
|
|
303
|
+
return self._parse_generic_errors(output)
|
|
304
|
+
|
|
305
|
+
def _parse_ts_errors(self, output: str) -> List[CodeError]:
|
|
306
|
+
"""Parse TypeScript error output"""
|
|
307
|
+
errors = []
|
|
308
|
+
|
|
309
|
+
# Format: file(line,col): error TSxxxx: message
|
|
310
|
+
pattern = r'([^(]+)\((\d+),(\d+)\): error (TS\d+): (.+)'
|
|
311
|
+
matches = re.finditer(pattern, output)
|
|
312
|
+
|
|
313
|
+
for match in matches:
|
|
314
|
+
errors.append(CodeError(
|
|
315
|
+
file_path=match.group(1),
|
|
316
|
+
line_number=int(match.group(2)),
|
|
317
|
+
column=int(match.group(3)),
|
|
318
|
+
message=f"{match.group(4)}: {match.group(5)}",
|
|
319
|
+
error_type=ErrorType.TYPE
|
|
320
|
+
))
|
|
321
|
+
|
|
322
|
+
return errors
|
|
323
|
+
|
|
324
|
+
def _parse_eslint_output(self, output: str) -> List[CodeError]:
|
|
325
|
+
"""Parse ESLint JSON output"""
|
|
326
|
+
import json
|
|
327
|
+
errors = []
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
data = json.loads(output)
|
|
331
|
+
for file_result in data:
|
|
332
|
+
file_path = file_result.get("filePath", "")
|
|
333
|
+
for msg in file_result.get("messages", []):
|
|
334
|
+
error_type = ErrorType.LINT if msg.get("severity") == 1 else ErrorType.SYNTAX
|
|
335
|
+
errors.append(CodeError(
|
|
336
|
+
file_path=file_path,
|
|
337
|
+
line_number=msg.get("line", 1),
|
|
338
|
+
column=msg.get("column"),
|
|
339
|
+
message=f"{msg.get('ruleId', 'error')}: {msg.get('message', '')}",
|
|
340
|
+
error_type=error_type
|
|
341
|
+
))
|
|
342
|
+
except json.JSONDecodeError:
|
|
343
|
+
pass
|
|
344
|
+
|
|
345
|
+
return errors
|
|
346
|
+
|
|
347
|
+
def _parse_generic_errors(self, output: str) -> List[CodeError]:
|
|
348
|
+
"""Generic error parser"""
|
|
349
|
+
errors = []
|
|
350
|
+
|
|
351
|
+
# Common patterns: file:line:message or file(line):message
|
|
352
|
+
patterns = [
|
|
353
|
+
r'([^:\s]+):(\d+):(?:\d+:)?\s*(.+)',
|
|
354
|
+
r'([^(\s]+)\((\d+)\):\s*(.+)',
|
|
355
|
+
]
|
|
356
|
+
|
|
357
|
+
for pattern in patterns:
|
|
358
|
+
matches = re.finditer(pattern, output, re.MULTILINE)
|
|
359
|
+
for match in matches:
|
|
360
|
+
errors.append(CodeError(
|
|
361
|
+
file_path=match.group(1),
|
|
362
|
+
line_number=int(match.group(2)),
|
|
363
|
+
column=None,
|
|
364
|
+
message=match.group(3).strip(),
|
|
365
|
+
error_type=ErrorType.RUNTIME
|
|
366
|
+
))
|
|
367
|
+
|
|
368
|
+
return errors
|
|
369
|
+
|
|
370
|
+
def _generate_fix(self, error: CodeError) -> Optional[Fix]:
|
|
371
|
+
"""Generate a fix for an error using LLM
|
|
372
|
+
|
|
373
|
+
Args:
|
|
374
|
+
error: The error to fix
|
|
375
|
+
|
|
376
|
+
Returns:
|
|
377
|
+
Fix object or None if cannot fix
|
|
378
|
+
"""
|
|
379
|
+
if not os.path.exists(error.file_path):
|
|
380
|
+
return None
|
|
381
|
+
|
|
382
|
+
# Read the file
|
|
383
|
+
with open(error.file_path, 'r') as f:
|
|
384
|
+
content = f.read()
|
|
385
|
+
lines = content.split('\n')
|
|
386
|
+
|
|
387
|
+
# Get context around error
|
|
388
|
+
start_line = max(0, error.line_number - 5)
|
|
389
|
+
end_line = min(len(lines), error.line_number + 5)
|
|
390
|
+
context = '\n'.join(f"{i+1}: {lines[i]}" for i in range(start_line, end_line))
|
|
391
|
+
|
|
392
|
+
# Generate fix prompt
|
|
393
|
+
prompt = f"""Fix this code error:
|
|
394
|
+
|
|
395
|
+
File: {error.file_path}
|
|
396
|
+
Error at line {error.line_number}: {error.message}
|
|
397
|
+
|
|
398
|
+
Code context:
|
|
399
|
+
```
|
|
400
|
+
{context}
|
|
401
|
+
```
|
|
402
|
+
|
|
403
|
+
Provide ONLY the corrected code snippet (the specific lines that need to change).
|
|
404
|
+
Do not include line numbers or explanations, just the corrected code.
|
|
405
|
+
"""
|
|
406
|
+
|
|
407
|
+
from ..llm_adapter import TaskType
|
|
408
|
+
response = self.llm.complete(prompt, task_type=TaskType.CODING, max_tokens=500)
|
|
409
|
+
|
|
410
|
+
# Extract fixed code from response
|
|
411
|
+
fixed_code = self._extract_code(response)
|
|
412
|
+
if not fixed_code:
|
|
413
|
+
return None
|
|
414
|
+
|
|
415
|
+
# Get original code around error line
|
|
416
|
+
original_code = lines[error.line_number - 1] if error.line_number <= len(lines) else ""
|
|
417
|
+
|
|
418
|
+
return Fix(
|
|
419
|
+
file_path=error.file_path,
|
|
420
|
+
original_code=original_code,
|
|
421
|
+
fixed_code=fixed_code,
|
|
422
|
+
description=f"Fix for: {error.message}",
|
|
423
|
+
confidence=0.8
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
def _extract_code(self, response: str) -> Optional[str]:
|
|
427
|
+
"""Extract code from LLM response"""
|
|
428
|
+
# Try to extract from code blocks
|
|
429
|
+
code_match = re.search(r'```(?:\w+)?\n([\s\S]*?)```', response)
|
|
430
|
+
if code_match:
|
|
431
|
+
return code_match.group(1).strip()
|
|
432
|
+
|
|
433
|
+
# Return cleaned response
|
|
434
|
+
cleaned = response.strip()
|
|
435
|
+
if cleaned:
|
|
436
|
+
return cleaned
|
|
437
|
+
|
|
438
|
+
return None
|
|
439
|
+
|
|
440
|
+
def _apply_fix(self, fix: Fix) -> bool:
|
|
441
|
+
"""Apply a fix to a file
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
fix: Fix to apply
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
True if successful
|
|
448
|
+
"""
|
|
449
|
+
try:
|
|
450
|
+
with open(fix.file_path, 'r') as f:
|
|
451
|
+
content = f.read()
|
|
452
|
+
|
|
453
|
+
# Replace original code with fixed code
|
|
454
|
+
if fix.original_code in content:
|
|
455
|
+
new_content = content.replace(fix.original_code, fix.fixed_code, 1)
|
|
456
|
+
|
|
457
|
+
with open(fix.file_path, 'w') as f:
|
|
458
|
+
f.write(new_content)
|
|
459
|
+
|
|
460
|
+
return True
|
|
461
|
+
|
|
462
|
+
except Exception:
|
|
463
|
+
pass
|
|
464
|
+
|
|
465
|
+
return False
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def auto_fix_command(file_path: str, auto_apply: bool = False) -> str:
|
|
469
|
+
"""Command-line interface for auto-fix
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
file_path: Path to file to fix
|
|
473
|
+
auto_apply: Whether to auto-apply fixes
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
Summary of results
|
|
477
|
+
"""
|
|
478
|
+
from ..llm_adapter import LLMAdapter
|
|
479
|
+
|
|
480
|
+
agent = AutoFixAgent(LLMAdapter())
|
|
481
|
+
errors, fixes = agent.fix_file(file_path, auto_apply)
|
|
482
|
+
|
|
483
|
+
output = []
|
|
484
|
+
output.append(f"\n{'='*60}")
|
|
485
|
+
output.append(f"Auto-Fix Analysis: {file_path}")
|
|
486
|
+
output.append(f"{'='*60}\n")
|
|
487
|
+
|
|
488
|
+
if not errors:
|
|
489
|
+
output.append("No errors found!")
|
|
490
|
+
return '\n'.join(output)
|
|
491
|
+
|
|
492
|
+
output.append(f"Found {len(errors)} error(s):\n")
|
|
493
|
+
|
|
494
|
+
for i, error in enumerate(errors, 1):
|
|
495
|
+
output.append(f"{i}. Line {error.line_number}: {error.message}")
|
|
496
|
+
output.append(f" Type: {error.error_type.value}")
|
|
497
|
+
|
|
498
|
+
if fixes:
|
|
499
|
+
output.append(f"\nGenerated {len(fixes)} fix(es):\n")
|
|
500
|
+
for i, fix in enumerate(fixes, 1):
|
|
501
|
+
status = "Applied" if auto_apply else "Ready to apply"
|
|
502
|
+
output.append(f"{i}. {fix.description}")
|
|
503
|
+
output.append(f" Status: {status}")
|
|
504
|
+
output.append(f" Confidence: {fix.confidence*100:.0f}%")
|
|
505
|
+
|
|
506
|
+
return '\n'.join(output)
|