auto-code-fixer 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_code_fixer/__init__.py +1 -1
- auto_code_fixer/approval.py +114 -0
- auto_code_fixer/cli.py +760 -60
- auto_code_fixer/reporting.py +61 -0
- auto_code_fixer/sandbox.py +35 -0
- auto_code_fixer/utils.py +42 -0
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/METADATA +58 -6
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/RECORD +12 -10
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/WHEEL +0 -0
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/entry_points.txt +0 -0
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {auto_code_fixer-0.3.6.dist-info → auto_code_fixer-0.4.0.dist-info}/top_level.txt +0 -0
auto_code_fixer/cli.py
CHANGED
|
@@ -9,11 +9,14 @@ from auto_code_fixer.runner import run_code
|
|
|
9
9
|
from auto_code_fixer.fixer import fix_code_with_gpt
|
|
10
10
|
from auto_code_fixer.installer import check_and_install_missing_lib
|
|
11
11
|
from auto_code_fixer.utils import (
|
|
12
|
+
changed_py_files,
|
|
12
13
|
discover_all_files,
|
|
13
14
|
is_in_project,
|
|
14
15
|
log,
|
|
15
16
|
set_verbose,
|
|
17
|
+
snapshot_py_hashes,
|
|
16
18
|
)
|
|
19
|
+
from auto_code_fixer.reporting import FixReport, ReportAttempt, unified_diff
|
|
17
20
|
from auto_code_fixer import __version__
|
|
18
21
|
|
|
19
22
|
DEFAULT_MAX_RETRIES = 8 # can be overridden via CLI
|
|
@@ -35,9 +38,15 @@ def fix_file(
|
|
|
35
38
|
max_files_changed: int,
|
|
36
39
|
context_files: int,
|
|
37
40
|
approve: bool,
|
|
41
|
+
max_diff_lines: int,
|
|
42
|
+
max_file_bytes: int,
|
|
43
|
+
max_total_bytes: int,
|
|
44
|
+
post_apply_check: bool,
|
|
38
45
|
fmt: str | None,
|
|
39
46
|
lint: str | None,
|
|
40
47
|
lint_fix: bool,
|
|
48
|
+
ruff_first: bool,
|
|
49
|
+
report: FixReport | None,
|
|
41
50
|
) -> bool:
|
|
42
51
|
log(f"Processing entry file: {file_path}")
|
|
43
52
|
|
|
@@ -67,12 +76,54 @@ def fix_file(
|
|
|
67
76
|
|
|
68
77
|
venv_python = create_venv(sandbox_root)
|
|
69
78
|
|
|
79
|
+
def _run_ruff_first_pass() -> bool:
|
|
80
|
+
"""Best-effort: run ruff auto-fix + import sorting + formatting before the LLM.
|
|
81
|
+
|
|
82
|
+
Returns True if it likely made changes.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
from auto_code_fixer.command_runner import run_command
|
|
86
|
+
|
|
87
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
88
|
+
|
|
89
|
+
# 1) Fix lint issues (including isort rules). --select I makes import sorting run
|
|
90
|
+
# even without an explicit project config.
|
|
91
|
+
rc1, _out1, err1 = run_command(
|
|
92
|
+
"python -m ruff check . --fix --select I",
|
|
93
|
+
timeout_s=max(timeout_s, 60),
|
|
94
|
+
python_exe=venv_python,
|
|
95
|
+
cwd=sandbox_root,
|
|
96
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
97
|
+
)
|
|
98
|
+
if rc1 != 0:
|
|
99
|
+
if "No module named" in (err1 or "") and "ruff" in (err1 or ""):
|
|
100
|
+
log("ruff not installed in sandbox venv; skipping ruff-first", "DEBUG")
|
|
101
|
+
return False
|
|
102
|
+
|
|
103
|
+
# 2) Format (ruff formatter). Ignore failures.
|
|
104
|
+
run_command(
|
|
105
|
+
"python -m ruff format .",
|
|
106
|
+
timeout_s=max(timeout_s, 60),
|
|
107
|
+
python_exe=venv_python,
|
|
108
|
+
cwd=sandbox_root,
|
|
109
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
113
|
+
ch = changed_py_files(before, after)
|
|
114
|
+
if ch:
|
|
115
|
+
changed_sandbox_files.update({os.path.abspath(p) for p in ch})
|
|
116
|
+
return True
|
|
117
|
+
return False
|
|
118
|
+
|
|
70
119
|
def _run_optional_formatters_and_linters() -> None:
|
|
71
120
|
# Best-effort formatting/linting (only if tools are installed in the sandbox venv).
|
|
72
121
|
from auto_code_fixer.command_runner import run_command
|
|
73
122
|
|
|
123
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
124
|
+
|
|
74
125
|
if fmt == "black":
|
|
75
|
-
rc,
|
|
126
|
+
rc, _out, err = run_command(
|
|
76
127
|
"python -m black .",
|
|
77
128
|
timeout_s=max(timeout_s, 60),
|
|
78
129
|
python_exe=venv_python,
|
|
@@ -92,7 +143,7 @@ def fix_file(
|
|
|
92
143
|
cmd = "python -m ruff check ."
|
|
93
144
|
if lint_fix:
|
|
94
145
|
cmd += " --fix"
|
|
95
|
-
rc,
|
|
146
|
+
rc, _out, err = run_command(
|
|
96
147
|
cmd,
|
|
97
148
|
timeout_s=max(timeout_s, 60),
|
|
98
149
|
python_exe=venv_python,
|
|
@@ -107,11 +158,25 @@ def fix_file(
|
|
|
107
158
|
else:
|
|
108
159
|
log(f"ruff reported issues (rc={rc}): {err}", "DEBUG")
|
|
109
160
|
|
|
161
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
162
|
+
ch = changed_py_files(before, after)
|
|
163
|
+
if ch:
|
|
164
|
+
changed_sandbox_files.update({os.path.abspath(p) for p in ch})
|
|
165
|
+
|
|
110
166
|
changed_sandbox_files: set[str] = set()
|
|
111
167
|
|
|
112
168
|
for attempt in range(max_retries):
|
|
113
169
|
log(f"Run attempt #{attempt + 1}")
|
|
114
170
|
|
|
171
|
+
changed_before = set(changed_sandbox_files)
|
|
172
|
+
|
|
173
|
+
attempt_report: ReportAttempt | None = None
|
|
174
|
+
if report is not None:
|
|
175
|
+
attempt_report = ReportAttempt(
|
|
176
|
+
index=attempt + 1,
|
|
177
|
+
run_cmd=run_cmd or f"python {os.path.relpath(sandbox_entry, sandbox_root)}",
|
|
178
|
+
)
|
|
179
|
+
|
|
115
180
|
# Ensure local modules resolve inside sandbox
|
|
116
181
|
if run_cmd:
|
|
117
182
|
from auto_code_fixer.command_runner import run_command
|
|
@@ -132,6 +197,11 @@ def fix_file(
|
|
|
132
197
|
extra_env={"PYTHONPATH": sandbox_root},
|
|
133
198
|
)
|
|
134
199
|
|
|
200
|
+
if attempt_report is not None:
|
|
201
|
+
attempt_report.return_code = int(retcode)
|
|
202
|
+
attempt_report.stdout = stdout
|
|
203
|
+
attempt_report.stderr = stderr
|
|
204
|
+
|
|
135
205
|
if verbose:
|
|
136
206
|
if stdout:
|
|
137
207
|
log(f"STDOUT:\n{stdout}", "DEBUG")
|
|
@@ -141,6 +211,11 @@ def fix_file(
|
|
|
141
211
|
if retcode == 0:
|
|
142
212
|
log("Script executed successfully ✅")
|
|
143
213
|
|
|
214
|
+
if attempt_report is not None and report is not None:
|
|
215
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
216
|
+
attempt_report.changed_files = delta
|
|
217
|
+
report.attempts.append(attempt_report)
|
|
218
|
+
|
|
144
219
|
# Apply sandbox changes back to project (only if we actually changed something)
|
|
145
220
|
if attempt > 0 and is_in_project(file_path, project_root) and changed_sandbox_files:
|
|
146
221
|
rel_changes = [os.path.relpath(p, sandbox_root) for p in sorted(changed_sandbox_files)]
|
|
@@ -163,6 +238,8 @@ def fix_file(
|
|
|
163
238
|
sr = os.path.realpath(os.path.abspath(sandbox_root))
|
|
164
239
|
pr = os.path.realpath(os.path.abspath(project_root))
|
|
165
240
|
|
|
241
|
+
backups: dict[str, str] = {}
|
|
242
|
+
|
|
166
243
|
for p in sorted(changed_sandbox_files):
|
|
167
244
|
p_real = os.path.realpath(os.path.abspath(p))
|
|
168
245
|
|
|
@@ -192,12 +269,62 @@ def fix_file(
|
|
|
192
269
|
|
|
193
270
|
if os.path.exists(dst_real):
|
|
194
271
|
bak = backup_file(dst_real)
|
|
272
|
+
backups[dst_real] = bak
|
|
195
273
|
log(f"Backup created: {bak}", "DEBUG")
|
|
196
274
|
|
|
197
275
|
os.makedirs(os.path.dirname(dst_real), exist_ok=True)
|
|
276
|
+
|
|
277
|
+
if report is not None:
|
|
278
|
+
try:
|
|
279
|
+
old_text = ""
|
|
280
|
+
if os.path.exists(dst_real):
|
|
281
|
+
old_text = open(dst_real, encoding="utf-8").read()
|
|
282
|
+
new_text = open(p_real, encoding="utf-8").read()
|
|
283
|
+
rel_report = os.path.relpath(dst_real, project_root)
|
|
284
|
+
report.files_touched = sorted(set(report.files_touched + [rel_report]))
|
|
285
|
+
report.diffs[rel_report] = unified_diff(
|
|
286
|
+
old=old_text,
|
|
287
|
+
new=new_text,
|
|
288
|
+
fromfile=f"a/{rel_report}",
|
|
289
|
+
tofile=f"b/{rel_report}",
|
|
290
|
+
)
|
|
291
|
+
except Exception:
|
|
292
|
+
pass
|
|
293
|
+
|
|
198
294
|
shutil.copy(p_real, dst_real)
|
|
199
295
|
log(f"File updated: {dst_real}")
|
|
200
296
|
|
|
297
|
+
# Optional post-apply verification: run the same command against the real project files,
|
|
298
|
+
# using the sandbox venv for dependencies.
|
|
299
|
+
if post_apply_check and run_cmd and changed_sandbox_files:
|
|
300
|
+
from auto_code_fixer.command_runner import run_command
|
|
301
|
+
|
|
302
|
+
rc2, out2, err2 = run_command(
|
|
303
|
+
run_cmd,
|
|
304
|
+
timeout_s=timeout_s,
|
|
305
|
+
python_exe=venv_python,
|
|
306
|
+
cwd=project_root,
|
|
307
|
+
extra_env={"PYTHONPATH": project_root},
|
|
308
|
+
)
|
|
309
|
+
if verbose:
|
|
310
|
+
if out2:
|
|
311
|
+
log(f"POST-APPLY STDOUT:\n{out2}", "DEBUG")
|
|
312
|
+
if err2:
|
|
313
|
+
log(f"POST-APPLY STDERR:\n{err2}", "DEBUG")
|
|
314
|
+
|
|
315
|
+
if rc2 != 0:
|
|
316
|
+
log(
|
|
317
|
+
"Post-apply command failed; restoring backups (best-effort).",
|
|
318
|
+
"ERROR",
|
|
319
|
+
)
|
|
320
|
+
for dst_real, bak in backups.items():
|
|
321
|
+
try:
|
|
322
|
+
shutil.copy(bak, dst_real)
|
|
323
|
+
except Exception as e:
|
|
324
|
+
log(f"WARN: failed to restore {dst_real} from {bak}: {e}")
|
|
325
|
+
cleanup_sandbox()
|
|
326
|
+
return False
|
|
327
|
+
|
|
201
328
|
cleanup_sandbox()
|
|
202
329
|
log(f"Fix completed in {attempt + 1} attempt(s) 🎉")
|
|
203
330
|
return True
|
|
@@ -208,8 +335,55 @@ def fix_file(
|
|
|
208
335
|
if check_and_install_missing_lib(stderr, python_exe=venv_python, project_root=sandbox_root):
|
|
209
336
|
log("Missing dependency installed (venv), retrying…")
|
|
210
337
|
time.sleep(1)
|
|
338
|
+
if attempt_report is not None and report is not None:
|
|
339
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
340
|
+
attempt_report.changed_files = delta
|
|
341
|
+
report.attempts.append(attempt_report)
|
|
211
342
|
continue
|
|
212
343
|
|
|
344
|
+
# Optional: ruff-first pass before invoking the LLM.
|
|
345
|
+
if ruff_first:
|
|
346
|
+
changed = _run_ruff_first_pass()
|
|
347
|
+
if attempt_report is not None:
|
|
348
|
+
attempt_report.ruff_first_applied = bool(changed)
|
|
349
|
+
|
|
350
|
+
if changed:
|
|
351
|
+
# Re-run command after ruff fixes.
|
|
352
|
+
if run_cmd:
|
|
353
|
+
from auto_code_fixer.command_runner import run_command
|
|
354
|
+
|
|
355
|
+
rc_r, out_r, err_r = run_command(
|
|
356
|
+
run_cmd,
|
|
357
|
+
timeout_s=timeout_s,
|
|
358
|
+
python_exe=venv_python,
|
|
359
|
+
cwd=sandbox_root,
|
|
360
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
361
|
+
)
|
|
362
|
+
else:
|
|
363
|
+
rc_r, out_r, err_r = run_code(
|
|
364
|
+
sandbox_entry,
|
|
365
|
+
timeout_s=timeout_s,
|
|
366
|
+
python_exe=venv_python,
|
|
367
|
+
cwd=sandbox_root,
|
|
368
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if rc_r == 0:
|
|
372
|
+
log("Ruff-first fixed the issue; continuing", "DEBUG")
|
|
373
|
+
if attempt_report is not None and report is not None:
|
|
374
|
+
attempt_report.return_code = int(rc_r)
|
|
375
|
+
attempt_report.stdout = out_r
|
|
376
|
+
attempt_report.stderr = err_r
|
|
377
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
378
|
+
attempt_report.changed_files = delta
|
|
379
|
+
report.attempts.append(attempt_report)
|
|
380
|
+
continue
|
|
381
|
+
else:
|
|
382
|
+
# Use the updated stderr/stdout for the LLM selection.
|
|
383
|
+
stdout = out_r
|
|
384
|
+
stderr = err_r
|
|
385
|
+
retcode = rc_r
|
|
386
|
+
|
|
213
387
|
# Pick the most relevant local file from the traceback (entry or imported file)
|
|
214
388
|
from auto_code_fixer.traceback_utils import pick_relevant_file
|
|
215
389
|
|
|
@@ -244,6 +418,12 @@ def fix_file(
|
|
|
244
418
|
except Exception:
|
|
245
419
|
pass
|
|
246
420
|
|
|
421
|
+
if attempt_report is not None:
|
|
422
|
+
try:
|
|
423
|
+
attempt_report.selected_file = os.path.relpath(target_file, sandbox_root)
|
|
424
|
+
except Exception:
|
|
425
|
+
attempt_report.selected_file = target_file
|
|
426
|
+
|
|
247
427
|
log(f"Sending {os.path.relpath(target_file, sandbox_root)} + error to GPT 🧠", "DEBUG")
|
|
248
428
|
|
|
249
429
|
applied_any = False
|
|
@@ -302,7 +482,14 @@ def fix_file(
|
|
|
302
482
|
# Prepare diffs / approvals and apply atomically.
|
|
303
483
|
sha_by_rel = {pf.path: pf.sha256 for pf in patch_files}
|
|
304
484
|
|
|
305
|
-
|
|
485
|
+
from auto_code_fixer.approval import (
|
|
486
|
+
PlannedChange,
|
|
487
|
+
UserAbort,
|
|
488
|
+
guard_planned_changes,
|
|
489
|
+
prompt_approve_file_by_file,
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
planned: list[PlannedChange] = []
|
|
306
493
|
for abs_path, new_content in resolved:
|
|
307
494
|
old = ""
|
|
308
495
|
if os.path.exists(abs_path):
|
|
@@ -310,7 +497,14 @@ def fix_file(
|
|
|
310
497
|
if new_content.strip() == (old or "").strip():
|
|
311
498
|
continue
|
|
312
499
|
rel = os.path.relpath(abs_path, sandbox_root)
|
|
313
|
-
planned.append(
|
|
500
|
+
planned.append(
|
|
501
|
+
PlannedChange(
|
|
502
|
+
abs_path=abs_path,
|
|
503
|
+
rel_path=rel,
|
|
504
|
+
old_content=old,
|
|
505
|
+
new_content=new_content,
|
|
506
|
+
)
|
|
507
|
+
)
|
|
314
508
|
|
|
315
509
|
if planned:
|
|
316
510
|
if len(planned) > max_files_changed:
|
|
@@ -318,32 +512,28 @@ def fix_file(
|
|
|
318
512
|
f"Patch would change {len(planned)} files, exceeding --max-files-changed={max_files_changed}"
|
|
319
513
|
)
|
|
320
514
|
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
(new or "").splitlines(keepends=True),
|
|
328
|
-
fromfile=rel + " (before)",
|
|
329
|
-
tofile=rel + " (after)",
|
|
330
|
-
)
|
|
331
|
-
)
|
|
332
|
-
print(diff)
|
|
515
|
+
guard_planned_changes(
|
|
516
|
+
planned,
|
|
517
|
+
max_file_bytes=max_file_bytes,
|
|
518
|
+
max_total_bytes=max_total_bytes,
|
|
519
|
+
max_diff_lines=max_diff_lines,
|
|
520
|
+
)
|
|
333
521
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
522
|
+
if approve:
|
|
523
|
+
try:
|
|
524
|
+
planned = prompt_approve_file_by_file(planned)
|
|
525
|
+
except UserAbort:
|
|
526
|
+
log("User aborted patch application", "WARN")
|
|
337
527
|
planned = []
|
|
338
528
|
|
|
339
|
-
for
|
|
340
|
-
expected_sha = sha_by_rel.get(
|
|
529
|
+
for ch in planned:
|
|
530
|
+
expected_sha = sha_by_rel.get(ch.rel_path)
|
|
341
531
|
if not expected_sha:
|
|
342
532
|
# Shouldn't happen; keep safe.
|
|
343
|
-
raise ValueError(f"Missing sha256 for {
|
|
533
|
+
raise ValueError(f"Missing sha256 for {ch.rel_path} in patch protocol payload")
|
|
344
534
|
|
|
345
|
-
atomic_write_verified_sha256(abs_path, new_content, expected_sha)
|
|
346
|
-
changed_sandbox_files.add(os.path.abspath(abs_path))
|
|
535
|
+
atomic_write_verified_sha256(ch.abs_path, ch.new_content, expected_sha)
|
|
536
|
+
changed_sandbox_files.add(os.path.abspath(ch.abs_path))
|
|
347
537
|
applied_any = True
|
|
348
538
|
|
|
349
539
|
except Exception as e:
|
|
@@ -361,29 +551,48 @@ def fix_file(
|
|
|
361
551
|
log("GPT returned no changes. Stopping.", "WARN")
|
|
362
552
|
break
|
|
363
553
|
|
|
364
|
-
|
|
365
|
-
|
|
554
|
+
from auto_code_fixer.approval import (
|
|
555
|
+
PlannedChange,
|
|
556
|
+
UserAbort,
|
|
557
|
+
guard_planned_changes,
|
|
558
|
+
prompt_approve_file_by_file,
|
|
559
|
+
)
|
|
366
560
|
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
561
|
+
old = open(target_file, encoding="utf-8").read()
|
|
562
|
+
rel = os.path.relpath(target_file, sandbox_root)
|
|
563
|
+
|
|
564
|
+
planned = [
|
|
565
|
+
PlannedChange(
|
|
566
|
+
abs_path=target_file,
|
|
567
|
+
rel_path=rel,
|
|
568
|
+
old_content=old,
|
|
569
|
+
new_content=fixed_code,
|
|
375
570
|
)
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
571
|
+
]
|
|
572
|
+
|
|
573
|
+
guard_planned_changes(
|
|
574
|
+
planned,
|
|
575
|
+
max_file_bytes=max_file_bytes,
|
|
576
|
+
max_total_bytes=max_total_bytes,
|
|
577
|
+
max_diff_lines=max_diff_lines,
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
if approve:
|
|
581
|
+
try:
|
|
582
|
+
planned = prompt_approve_file_by_file(planned)
|
|
583
|
+
except UserAbort:
|
|
380
584
|
log("User declined patch application", "WARN")
|
|
381
585
|
cleanup_sandbox()
|
|
382
586
|
return False
|
|
383
587
|
|
|
588
|
+
if not planned:
|
|
589
|
+
log("No legacy changes approved", "WARN")
|
|
590
|
+
cleanup_sandbox()
|
|
591
|
+
return False
|
|
592
|
+
|
|
384
593
|
from auto_code_fixer.patcher import safe_write
|
|
385
594
|
|
|
386
|
-
safe_write(target_file,
|
|
595
|
+
safe_write(target_file, planned[0].new_content)
|
|
387
596
|
changed_sandbox_files.add(os.path.abspath(target_file))
|
|
388
597
|
applied_any = True
|
|
389
598
|
|
|
@@ -392,11 +601,379 @@ def fix_file(
|
|
|
392
601
|
log("Code updated by GPT ✏️")
|
|
393
602
|
time.sleep(1)
|
|
394
603
|
|
|
604
|
+
if attempt_report is not None and report is not None:
|
|
605
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
606
|
+
attempt_report.changed_files = delta
|
|
607
|
+
report.attempts.append(attempt_report)
|
|
608
|
+
|
|
395
609
|
log("Failed to auto-fix file after max retries ❌", "ERROR")
|
|
396
610
|
cleanup_sandbox()
|
|
397
611
|
return False
|
|
398
612
|
|
|
399
613
|
|
|
614
|
+
def fixpack(
|
|
615
|
+
target_path: str,
|
|
616
|
+
project_root: str,
|
|
617
|
+
api_key: str | None,
|
|
618
|
+
ask: bool,
|
|
619
|
+
verbose: bool,
|
|
620
|
+
*,
|
|
621
|
+
dry_run: bool,
|
|
622
|
+
model: str | None,
|
|
623
|
+
timeout_s: int,
|
|
624
|
+
max_retries: int,
|
|
625
|
+
run_cmd: str | None,
|
|
626
|
+
patch_protocol: bool,
|
|
627
|
+
max_files_changed: int,
|
|
628
|
+
context_files: int,
|
|
629
|
+
approve: bool,
|
|
630
|
+
max_diff_lines: int,
|
|
631
|
+
max_file_bytes: int,
|
|
632
|
+
max_total_bytes: int,
|
|
633
|
+
post_apply_check: bool,
|
|
634
|
+
ruff_first: bool,
|
|
635
|
+
report: FixReport | None,
|
|
636
|
+
) -> bool:
|
|
637
|
+
"""Run a command (pytest by default) in a full-project sandbox and iteratively fix failures."""
|
|
638
|
+
|
|
639
|
+
from auto_code_fixer.sandbox import make_sandbox_project
|
|
640
|
+
from auto_code_fixer.venv_manager import create_venv
|
|
641
|
+
|
|
642
|
+
project_root = os.path.abspath(project_root)
|
|
643
|
+
target_path = os.path.abspath(target_path)
|
|
644
|
+
|
|
645
|
+
sandbox_root = make_sandbox_project(project_root=project_root)
|
|
646
|
+
if report is not None:
|
|
647
|
+
report.sandbox_root = sandbox_root
|
|
648
|
+
|
|
649
|
+
import atexit
|
|
650
|
+
|
|
651
|
+
def cleanup_sandbox() -> None:
|
|
652
|
+
try:
|
|
653
|
+
shutil.rmtree(sandbox_root)
|
|
654
|
+
except FileNotFoundError:
|
|
655
|
+
return
|
|
656
|
+
except Exception as e:
|
|
657
|
+
log(f"WARN: failed to delete sandbox dir {sandbox_root}: {e}")
|
|
658
|
+
|
|
659
|
+
atexit.register(cleanup_sandbox)
|
|
660
|
+
|
|
661
|
+
venv_python = create_venv(sandbox_root)
|
|
662
|
+
changed_sandbox_files: set[str] = set()
|
|
663
|
+
|
|
664
|
+
from auto_code_fixer.command_runner import run_command
|
|
665
|
+
|
|
666
|
+
effective_cmd = run_cmd or "pytest -q"
|
|
667
|
+
|
|
668
|
+
for attempt in range(max_retries):
|
|
669
|
+
log(f"Fixpack attempt #{attempt + 1}: running `{effective_cmd}`")
|
|
670
|
+
|
|
671
|
+
changed_before = set(changed_sandbox_files)
|
|
672
|
+
|
|
673
|
+
attempt_report: ReportAttempt | None = None
|
|
674
|
+
if report is not None:
|
|
675
|
+
attempt_report = ReportAttempt(index=attempt + 1, run_cmd=effective_cmd)
|
|
676
|
+
|
|
677
|
+
rc, out, err = run_command(
|
|
678
|
+
effective_cmd,
|
|
679
|
+
timeout_s=timeout_s,
|
|
680
|
+
python_exe=venv_python,
|
|
681
|
+
cwd=sandbox_root,
|
|
682
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
combined = (out or "") + "\n" + (err or "")
|
|
686
|
+
|
|
687
|
+
if attempt_report is not None:
|
|
688
|
+
attempt_report.return_code = int(rc)
|
|
689
|
+
attempt_report.stdout = out
|
|
690
|
+
attempt_report.stderr = err
|
|
691
|
+
|
|
692
|
+
if verbose:
|
|
693
|
+
if out:
|
|
694
|
+
log(f"STDOUT:\n{out}", "DEBUG")
|
|
695
|
+
if err:
|
|
696
|
+
log(f"STDERR:\n{err}", "DEBUG")
|
|
697
|
+
|
|
698
|
+
if rc == 0:
|
|
699
|
+
log("Fixpack command succeeded ✅")
|
|
700
|
+
|
|
701
|
+
if attempt_report is not None and report is not None:
|
|
702
|
+
attempt_report.changed_files = sorted(
|
|
703
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
704
|
+
)
|
|
705
|
+
report.attempts.append(attempt_report)
|
|
706
|
+
|
|
707
|
+
# Apply all changed sandbox files back to the real project.
|
|
708
|
+
if changed_sandbox_files:
|
|
709
|
+
rel_changes = [os.path.relpath(p, sandbox_root) for p in sorted(changed_sandbox_files)]
|
|
710
|
+
|
|
711
|
+
if ask:
|
|
712
|
+
confirm = input(
|
|
713
|
+
"Overwrite original files with fixed versions?\n"
|
|
714
|
+
+ "\n".join(f"- {c}" for c in rel_changes)
|
|
715
|
+
+ "\n(y/n): "
|
|
716
|
+
).strip().lower()
|
|
717
|
+
if confirm != "y":
|
|
718
|
+
log("User declined overwrite", "WARN")
|
|
719
|
+
cleanup_sandbox()
|
|
720
|
+
return False
|
|
721
|
+
|
|
722
|
+
if dry_run:
|
|
723
|
+
log("DRY RUN: would apply fixes:\n" + "\n".join(rel_changes), "WARN")
|
|
724
|
+
else:
|
|
725
|
+
# Compute diffs for report before copying.
|
|
726
|
+
for p in sorted(changed_sandbox_files):
|
|
727
|
+
rel = os.path.relpath(p, sandbox_root)
|
|
728
|
+
dst = os.path.join(project_root, rel)
|
|
729
|
+
if report is not None:
|
|
730
|
+
try:
|
|
731
|
+
old_text = ""
|
|
732
|
+
if os.path.exists(dst):
|
|
733
|
+
old_text = open(dst, encoding="utf-8").read()
|
|
734
|
+
new_text = open(p, encoding="utf-8").read()
|
|
735
|
+
report.files_touched = sorted(set(report.files_touched + [rel]))
|
|
736
|
+
report.diffs[rel] = unified_diff(
|
|
737
|
+
old=old_text,
|
|
738
|
+
new=new_text,
|
|
739
|
+
fromfile=f"a/{rel}",
|
|
740
|
+
tofile=f"b/{rel}",
|
|
741
|
+
)
|
|
742
|
+
except Exception:
|
|
743
|
+
pass
|
|
744
|
+
|
|
745
|
+
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
|
746
|
+
shutil.copy(p, dst)
|
|
747
|
+
log(f"File updated: {dst}")
|
|
748
|
+
|
|
749
|
+
if post_apply_check:
|
|
750
|
+
rc2, out2, err2 = run_command(
|
|
751
|
+
effective_cmd,
|
|
752
|
+
timeout_s=timeout_s,
|
|
753
|
+
python_exe=venv_python,
|
|
754
|
+
cwd=project_root,
|
|
755
|
+
extra_env={"PYTHONPATH": project_root},
|
|
756
|
+
)
|
|
757
|
+
if verbose:
|
|
758
|
+
if out2:
|
|
759
|
+
log(f"POST-APPLY STDOUT:\n{out2}", "DEBUG")
|
|
760
|
+
if err2:
|
|
761
|
+
log(f"POST-APPLY STDERR:\n{err2}", "DEBUG")
|
|
762
|
+
if rc2 != 0:
|
|
763
|
+
log("Post-apply command failed", "ERROR")
|
|
764
|
+
cleanup_sandbox()
|
|
765
|
+
return False
|
|
766
|
+
|
|
767
|
+
cleanup_sandbox()
|
|
768
|
+
return True
|
|
769
|
+
|
|
770
|
+
log("Fixpack command failed ❌", "ERROR")
|
|
771
|
+
print(combined)
|
|
772
|
+
|
|
773
|
+
if check_and_install_missing_lib(combined, python_exe=venv_python, project_root=sandbox_root):
|
|
774
|
+
log("Missing dependency installed (venv), retrying…")
|
|
775
|
+
time.sleep(1)
|
|
776
|
+
if attempt_report is not None and report is not None:
|
|
777
|
+
attempt_report.changed_files = sorted(
|
|
778
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
779
|
+
)
|
|
780
|
+
report.attempts.append(attempt_report)
|
|
781
|
+
continue
|
|
782
|
+
|
|
783
|
+
if ruff_first:
|
|
784
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
785
|
+
rc_r1, _o1, _e1 = run_command(
|
|
786
|
+
"python -m ruff check . --fix --select I",
|
|
787
|
+
timeout_s=max(timeout_s, 60),
|
|
788
|
+
python_exe=venv_python,
|
|
789
|
+
cwd=sandbox_root,
|
|
790
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
791
|
+
)
|
|
792
|
+
run_command(
|
|
793
|
+
"python -m ruff format .",
|
|
794
|
+
timeout_s=max(timeout_s, 60),
|
|
795
|
+
python_exe=venv_python,
|
|
796
|
+
cwd=sandbox_root,
|
|
797
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
798
|
+
)
|
|
799
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
800
|
+
ch = changed_py_files(before, after)
|
|
801
|
+
if ch:
|
|
802
|
+
changed_sandbox_files.update(ch)
|
|
803
|
+
if attempt_report is not None:
|
|
804
|
+
attempt_report.ruff_first_applied = True
|
|
805
|
+
|
|
806
|
+
# Re-run after ruff
|
|
807
|
+
rc3, out3, err3 = run_command(
|
|
808
|
+
effective_cmd,
|
|
809
|
+
timeout_s=timeout_s,
|
|
810
|
+
python_exe=venv_python,
|
|
811
|
+
cwd=sandbox_root,
|
|
812
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
813
|
+
)
|
|
814
|
+
if rc3 == 0:
|
|
815
|
+
if attempt_report is not None:
|
|
816
|
+
attempt_report.return_code = int(rc3)
|
|
817
|
+
attempt_report.stdout = out3
|
|
818
|
+
attempt_report.stderr = err3
|
|
819
|
+
if attempt_report is not None and report is not None:
|
|
820
|
+
attempt_report.changed_files = sorted(
|
|
821
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
822
|
+
)
|
|
823
|
+
report.attempts.append(attempt_report)
|
|
824
|
+
continue
|
|
825
|
+
combined = (out3 or "") + "\n" + (err3 or "")
|
|
826
|
+
|
|
827
|
+
from auto_code_fixer.traceback_utils import pick_relevant_file
|
|
828
|
+
|
|
829
|
+
target_file = pick_relevant_file(combined, sandbox_root=sandbox_root)
|
|
830
|
+
if not target_file:
|
|
831
|
+
log("Could not determine failing file from output; stopping.", "ERROR")
|
|
832
|
+
cleanup_sandbox()
|
|
833
|
+
return False
|
|
834
|
+
|
|
835
|
+
# Ensure inside sandbox
|
|
836
|
+
sr = os.path.realpath(os.path.abspath(sandbox_root))
|
|
837
|
+
tf = os.path.realpath(os.path.abspath(target_file))
|
|
838
|
+
if not (tf.startswith(sr + os.sep) or tf == sr):
|
|
839
|
+
log("Suspicious target file path outside sandbox; stopping.", "ERROR")
|
|
840
|
+
cleanup_sandbox()
|
|
841
|
+
return False
|
|
842
|
+
|
|
843
|
+
if attempt_report is not None:
|
|
844
|
+
attempt_report.selected_file = os.path.relpath(tf, sandbox_root)
|
|
845
|
+
|
|
846
|
+
applied_any = False
|
|
847
|
+
|
|
848
|
+
if patch_protocol:
|
|
849
|
+
try:
|
|
850
|
+
from auto_code_fixer.fixer import fix_code_with_gpt_patch_protocol
|
|
851
|
+
from auto_code_fixer.patch_protocol import (
|
|
852
|
+
parse_patch_protocol_response,
|
|
853
|
+
validate_and_resolve_patch_files,
|
|
854
|
+
)
|
|
855
|
+
from auto_code_fixer.patcher import safe_read, atomic_write_verified_sha256
|
|
856
|
+
from auto_code_fixer.utils import find_imports
|
|
857
|
+
from auto_code_fixer.approval import (
|
|
858
|
+
PlannedChange,
|
|
859
|
+
UserAbort,
|
|
860
|
+
guard_planned_changes,
|
|
861
|
+
prompt_approve_file_by_file,
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
hint_paths = [os.path.relpath(tf, sandbox_root)]
|
|
865
|
+
|
|
866
|
+
ctx_pairs: list[tuple[str, str]] = []
|
|
867
|
+
if context_files and context_files > 0:
|
|
868
|
+
rels: list[str] = []
|
|
869
|
+
for abs_p in find_imports(tf, sandbox_root):
|
|
870
|
+
try:
|
|
871
|
+
rels.append(os.path.relpath(abs_p, sandbox_root))
|
|
872
|
+
except Exception:
|
|
873
|
+
continue
|
|
874
|
+
rels = [r for r in rels if r not in hint_paths]
|
|
875
|
+
for rel in rels[:context_files]:
|
|
876
|
+
abs_p = os.path.join(sandbox_root, rel)
|
|
877
|
+
if os.path.exists(abs_p):
|
|
878
|
+
try:
|
|
879
|
+
ctx_pairs.append((rel, safe_read(abs_p)))
|
|
880
|
+
except Exception:
|
|
881
|
+
pass
|
|
882
|
+
|
|
883
|
+
raw = fix_code_with_gpt_patch_protocol(
|
|
884
|
+
sandbox_root=sandbox_root,
|
|
885
|
+
error_log=combined,
|
|
886
|
+
api_key=api_key,
|
|
887
|
+
model=model,
|
|
888
|
+
hint_paths=hint_paths,
|
|
889
|
+
context_files=ctx_pairs,
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
patch_files = parse_patch_protocol_response(raw)
|
|
893
|
+
if len(patch_files) > max_files_changed:
|
|
894
|
+
raise ValueError(
|
|
895
|
+
f"Patch wants to change {len(patch_files)} files, exceeding --max-files-changed={max_files_changed}"
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
resolved = validate_and_resolve_patch_files(patch_files, sandbox_root=sandbox_root)
|
|
899
|
+
sha_by_rel = {pf.path: pf.sha256 for pf in patch_files}
|
|
900
|
+
|
|
901
|
+
planned: list[PlannedChange] = []
|
|
902
|
+
for abs_path, new_content in resolved:
|
|
903
|
+
old = ""
|
|
904
|
+
if os.path.exists(abs_path):
|
|
905
|
+
old = safe_read(abs_path)
|
|
906
|
+
if new_content.strip() == (old or "").strip():
|
|
907
|
+
continue
|
|
908
|
+
rel = os.path.relpath(abs_path, sandbox_root)
|
|
909
|
+
planned.append(
|
|
910
|
+
PlannedChange(
|
|
911
|
+
abs_path=abs_path,
|
|
912
|
+
rel_path=rel,
|
|
913
|
+
old_content=old,
|
|
914
|
+
new_content=new_content,
|
|
915
|
+
)
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
if planned:
|
|
919
|
+
guard_planned_changes(
|
|
920
|
+
planned,
|
|
921
|
+
max_file_bytes=max_file_bytes,
|
|
922
|
+
max_total_bytes=max_total_bytes,
|
|
923
|
+
max_diff_lines=max_diff_lines,
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
if approve:
|
|
927
|
+
try:
|
|
928
|
+
planned = prompt_approve_file_by_file(planned)
|
|
929
|
+
except UserAbort:
|
|
930
|
+
planned = []
|
|
931
|
+
|
|
932
|
+
for chg in planned:
|
|
933
|
+
expected_sha = sha_by_rel.get(chg.rel_path)
|
|
934
|
+
if not expected_sha:
|
|
935
|
+
raise ValueError(f"Missing sha256 for {chg.rel_path} in patch protocol payload")
|
|
936
|
+
atomic_write_verified_sha256(chg.abs_path, chg.new_content, expected_sha)
|
|
937
|
+
changed_sandbox_files.add(os.path.abspath(chg.abs_path))
|
|
938
|
+
applied_any = True
|
|
939
|
+
|
|
940
|
+
except Exception as e:
|
|
941
|
+
log(f"Patch protocol failed ({e}); falling back to full-text mode", "WARN")
|
|
942
|
+
|
|
943
|
+
if not applied_any:
|
|
944
|
+
fixed_code = fix_code_with_gpt(
|
|
945
|
+
original_code=open(tf, encoding="utf-8").read(),
|
|
946
|
+
error_log=combined,
|
|
947
|
+
api_key=api_key,
|
|
948
|
+
model=model,
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
from auto_code_fixer.patcher import safe_write
|
|
952
|
+
|
|
953
|
+
if fixed_code.strip() == open(tf, encoding="utf-8").read().strip():
|
|
954
|
+
log("GPT returned no changes. Stopping.", "WARN")
|
|
955
|
+
cleanup_sandbox()
|
|
956
|
+
return False
|
|
957
|
+
|
|
958
|
+
safe_write(tf, fixed_code)
|
|
959
|
+
changed_sandbox_files.add(os.path.abspath(tf))
|
|
960
|
+
applied_any = True
|
|
961
|
+
|
|
962
|
+
if applied_any:
|
|
963
|
+
log("Code updated by GPT ✏️")
|
|
964
|
+
time.sleep(1)
|
|
965
|
+
|
|
966
|
+
if attempt_report is not None and report is not None:
|
|
967
|
+
attempt_report.changed_files = sorted(
|
|
968
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
969
|
+
)
|
|
970
|
+
report.attempts.append(attempt_report)
|
|
971
|
+
|
|
972
|
+
log("Failed to fixpack after max retries ❌", "ERROR")
|
|
973
|
+
cleanup_sandbox()
|
|
974
|
+
return False
|
|
975
|
+
|
|
976
|
+
|
|
400
977
|
def main():
|
|
401
978
|
parser = argparse.ArgumentParser(
|
|
402
979
|
description="Auto-fix Python code using OpenAI (advanced sandbox + retry loop)"
|
|
@@ -411,7 +988,7 @@ def main():
|
|
|
411
988
|
parser.add_argument(
|
|
412
989
|
"entry_file",
|
|
413
990
|
nargs="?",
|
|
414
|
-
help="Path to the main Python file",
|
|
991
|
+
help="Path to the main Python file (or folder/package path if --fixpack)",
|
|
415
992
|
)
|
|
416
993
|
|
|
417
994
|
parser.add_argument("--project-root", default=".")
|
|
@@ -428,6 +1005,35 @@ def main():
|
|
|
428
1005
|
"If set, it runs inside the sandbox venv."
|
|
429
1006
|
),
|
|
430
1007
|
)
|
|
1008
|
+
|
|
1009
|
+
parser.add_argument(
|
|
1010
|
+
"--fixpack",
|
|
1011
|
+
action="store_true",
|
|
1012
|
+
help=(
|
|
1013
|
+
"Fixpack mode: treat entry_file as a folder/package path, create a full-project sandbox, "
|
|
1014
|
+
"run a command (default: pytest -q), and iteratively fix failing files until green or budget."
|
|
1015
|
+
),
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
parser.add_argument(
|
|
1019
|
+
"--ruff-first",
|
|
1020
|
+
action="store_true",
|
|
1021
|
+
help=(
|
|
1022
|
+
"Before calling the LLM, try best-effort Ruff auto-fixes (including import sorting) and Ruff formatting."
|
|
1023
|
+
),
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
parser.add_argument(
|
|
1027
|
+
"--explain",
|
|
1028
|
+
action="store_true",
|
|
1029
|
+
help="Write a JSON report (report.json by default) with attempts, diffs, and final status.",
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
parser.add_argument(
|
|
1033
|
+
"--report",
|
|
1034
|
+
default=None,
|
|
1035
|
+
help="Path to write the JSON report (implies --explain). Default: ./report.json",
|
|
1036
|
+
)
|
|
431
1037
|
parser.add_argument(
|
|
432
1038
|
"--ai-plan",
|
|
433
1039
|
action="store_true",
|
|
@@ -463,7 +1069,44 @@ def main():
|
|
|
463
1069
|
parser.add_argument(
|
|
464
1070
|
"--approve",
|
|
465
1071
|
action="store_true",
|
|
466
|
-
help=
|
|
1072
|
+
help=(
|
|
1073
|
+
"Show diffs for proposed changes and ask for approval before applying them in the sandbox. "
|
|
1074
|
+
"In patch-protocol mode, approvals are file-by-file."
|
|
1075
|
+
),
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
parser.add_argument(
|
|
1079
|
+
"--max-diff-lines",
|
|
1080
|
+
type=int,
|
|
1081
|
+
default=4000,
|
|
1082
|
+
help="Safety guard: maximum unified-diff lines to display/apply per file (default: 4000)",
|
|
1083
|
+
)
|
|
1084
|
+
parser.add_argument(
|
|
1085
|
+
"--max-file-bytes",
|
|
1086
|
+
type=int,
|
|
1087
|
+
default=200_000,
|
|
1088
|
+
help="Safety guard: maximum size (bytes) of proposed new content per file (default: 200000)",
|
|
1089
|
+
)
|
|
1090
|
+
parser.add_argument(
|
|
1091
|
+
"--max-total-bytes",
|
|
1092
|
+
type=int,
|
|
1093
|
+
default=1_000_000,
|
|
1094
|
+
help="Safety guard: maximum total bytes of proposed new content across all files (default: 1000000)",
|
|
1095
|
+
)
|
|
1096
|
+
|
|
1097
|
+
post_apply_group = parser.add_mutually_exclusive_group()
|
|
1098
|
+
post_apply_group.add_argument(
|
|
1099
|
+
"--post-apply-check",
|
|
1100
|
+
action="store_true",
|
|
1101
|
+
help=(
|
|
1102
|
+
"If using --run, re-run the command against the real project files after applying fixes "
|
|
1103
|
+
"(uses sandbox venv for deps)."
|
|
1104
|
+
),
|
|
1105
|
+
)
|
|
1106
|
+
post_apply_group.add_argument(
|
|
1107
|
+
"--no-post-apply-check",
|
|
1108
|
+
action="store_true",
|
|
1109
|
+
help="Disable the post-apply re-run check (even if --run is set).",
|
|
467
1110
|
)
|
|
468
1111
|
|
|
469
1112
|
parser.add_argument(
|
|
@@ -527,25 +1170,82 @@ def main():
|
|
|
527
1170
|
if args.ai_plan:
|
|
528
1171
|
os.environ["AUTO_CODE_FIXER_AI_PLAN"] = "1"
|
|
529
1172
|
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
1173
|
+
if args.no_post_apply_check:
|
|
1174
|
+
post_apply_check = False
|
|
1175
|
+
elif args.post_apply_check:
|
|
1176
|
+
post_apply_check = True
|
|
1177
|
+
else:
|
|
1178
|
+
post_apply_check = bool(args.run)
|
|
1179
|
+
|
|
1180
|
+
report_path = args.report or ("report.json" if args.explain else None)
|
|
1181
|
+
report: FixReport | None = None
|
|
1182
|
+
if report_path is not None:
|
|
1183
|
+
report = FixReport(
|
|
1184
|
+
version=__version__,
|
|
1185
|
+
mode="fixpack" if args.fixpack else "file",
|
|
1186
|
+
project_root=os.path.abspath(args.project_root),
|
|
1187
|
+
target=os.path.abspath(args.entry_file),
|
|
1188
|
+
run_cmd=args.run or ("pytest -q" if args.fixpack else None),
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
try:
|
|
1192
|
+
if args.fixpack:
|
|
1193
|
+
ok = fixpack(
|
|
1194
|
+
args.entry_file,
|
|
1195
|
+
args.project_root,
|
|
1196
|
+
args.api_key,
|
|
1197
|
+
ask,
|
|
1198
|
+
verbose,
|
|
1199
|
+
dry_run=args.dry_run,
|
|
1200
|
+
model=args.model,
|
|
1201
|
+
timeout_s=args.timeout,
|
|
1202
|
+
max_retries=args.max_retries,
|
|
1203
|
+
run_cmd=args.run,
|
|
1204
|
+
patch_protocol=not args.legacy_mode,
|
|
1205
|
+
max_files_changed=args.max_files_changed,
|
|
1206
|
+
context_files=args.context_files,
|
|
1207
|
+
approve=args.approve,
|
|
1208
|
+
max_diff_lines=args.max_diff_lines,
|
|
1209
|
+
max_file_bytes=args.max_file_bytes,
|
|
1210
|
+
max_total_bytes=args.max_total_bytes,
|
|
1211
|
+
post_apply_check=post_apply_check,
|
|
1212
|
+
ruff_first=args.ruff_first,
|
|
1213
|
+
report=report,
|
|
1214
|
+
)
|
|
1215
|
+
else:
|
|
1216
|
+
ok = fix_file(
|
|
1217
|
+
args.entry_file,
|
|
1218
|
+
args.project_root,
|
|
1219
|
+
args.api_key,
|
|
1220
|
+
ask,
|
|
1221
|
+
verbose,
|
|
1222
|
+
dry_run=args.dry_run,
|
|
1223
|
+
model=args.model,
|
|
1224
|
+
timeout_s=args.timeout,
|
|
1225
|
+
max_retries=args.max_retries,
|
|
1226
|
+
run_cmd=args.run,
|
|
1227
|
+
patch_protocol=not args.legacy_mode,
|
|
1228
|
+
max_files_changed=args.max_files_changed,
|
|
1229
|
+
context_files=args.context_files,
|
|
1230
|
+
approve=args.approve,
|
|
1231
|
+
max_diff_lines=args.max_diff_lines,
|
|
1232
|
+
max_file_bytes=args.max_file_bytes,
|
|
1233
|
+
max_total_bytes=args.max_total_bytes,
|
|
1234
|
+
post_apply_check=post_apply_check,
|
|
1235
|
+
fmt=args.format,
|
|
1236
|
+
lint=args.lint,
|
|
1237
|
+
lint_fix=args.fix,
|
|
1238
|
+
ruff_first=args.ruff_first,
|
|
1239
|
+
report=report,
|
|
1240
|
+
)
|
|
1241
|
+
finally:
|
|
1242
|
+
if report is not None and report_path is not None:
|
|
1243
|
+
import datetime as _dt
|
|
1244
|
+
|
|
1245
|
+
report.finished_at = _dt.datetime.now(tz=_dt.timezone.utc).isoformat()
|
|
1246
|
+
report.ok = bool(locals().get("ok", False))
|
|
1247
|
+
report.write_json(report_path)
|
|
1248
|
+
log(f"Wrote report: {os.path.abspath(report_path)}", "DEBUG")
|
|
549
1249
|
|
|
550
1250
|
raise SystemExit(0 if ok else 2)
|
|
551
1251
|
|