auto-code-fixer 0.3.7__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_code_fixer/__init__.py +1 -1
- auto_code_fixer/cli.py +611 -26
- auto_code_fixer/reporting.py +61 -0
- auto_code_fixer/sandbox.py +35 -0
- auto_code_fixer/utils.py +42 -0
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/METADATA +26 -1
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/RECORD +11 -10
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/WHEEL +0 -0
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/entry_points.txt +0 -0
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {auto_code_fixer-0.3.7.dist-info → auto_code_fixer-0.4.0.dist-info}/top_level.txt +0 -0
auto_code_fixer/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.4.0"
|
auto_code_fixer/cli.py
CHANGED
|
@@ -9,11 +9,14 @@ from auto_code_fixer.runner import run_code
|
|
|
9
9
|
from auto_code_fixer.fixer import fix_code_with_gpt
|
|
10
10
|
from auto_code_fixer.installer import check_and_install_missing_lib
|
|
11
11
|
from auto_code_fixer.utils import (
|
|
12
|
+
changed_py_files,
|
|
12
13
|
discover_all_files,
|
|
13
14
|
is_in_project,
|
|
14
15
|
log,
|
|
15
16
|
set_verbose,
|
|
17
|
+
snapshot_py_hashes,
|
|
16
18
|
)
|
|
19
|
+
from auto_code_fixer.reporting import FixReport, ReportAttempt, unified_diff
|
|
17
20
|
from auto_code_fixer import __version__
|
|
18
21
|
|
|
19
22
|
DEFAULT_MAX_RETRIES = 8 # can be overridden via CLI
|
|
@@ -42,6 +45,8 @@ def fix_file(
|
|
|
42
45
|
fmt: str | None,
|
|
43
46
|
lint: str | None,
|
|
44
47
|
lint_fix: bool,
|
|
48
|
+
ruff_first: bool,
|
|
49
|
+
report: FixReport | None,
|
|
45
50
|
) -> bool:
|
|
46
51
|
log(f"Processing entry file: {file_path}")
|
|
47
52
|
|
|
@@ -71,12 +76,54 @@ def fix_file(
|
|
|
71
76
|
|
|
72
77
|
venv_python = create_venv(sandbox_root)
|
|
73
78
|
|
|
79
|
+
def _run_ruff_first_pass() -> bool:
|
|
80
|
+
"""Best-effort: run ruff auto-fix + import sorting + formatting before the LLM.
|
|
81
|
+
|
|
82
|
+
Returns True if it likely made changes.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
from auto_code_fixer.command_runner import run_command
|
|
86
|
+
|
|
87
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
88
|
+
|
|
89
|
+
# 1) Fix lint issues (including isort rules). --select I makes import sorting run
|
|
90
|
+
# even without an explicit project config.
|
|
91
|
+
rc1, _out1, err1 = run_command(
|
|
92
|
+
"python -m ruff check . --fix --select I",
|
|
93
|
+
timeout_s=max(timeout_s, 60),
|
|
94
|
+
python_exe=venv_python,
|
|
95
|
+
cwd=sandbox_root,
|
|
96
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
97
|
+
)
|
|
98
|
+
if rc1 != 0:
|
|
99
|
+
if "No module named" in (err1 or "") and "ruff" in (err1 or ""):
|
|
100
|
+
log("ruff not installed in sandbox venv; skipping ruff-first", "DEBUG")
|
|
101
|
+
return False
|
|
102
|
+
|
|
103
|
+
# 2) Format (ruff formatter). Ignore failures.
|
|
104
|
+
run_command(
|
|
105
|
+
"python -m ruff format .",
|
|
106
|
+
timeout_s=max(timeout_s, 60),
|
|
107
|
+
python_exe=venv_python,
|
|
108
|
+
cwd=sandbox_root,
|
|
109
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
113
|
+
ch = changed_py_files(before, after)
|
|
114
|
+
if ch:
|
|
115
|
+
changed_sandbox_files.update({os.path.abspath(p) for p in ch})
|
|
116
|
+
return True
|
|
117
|
+
return False
|
|
118
|
+
|
|
74
119
|
def _run_optional_formatters_and_linters() -> None:
|
|
75
120
|
# Best-effort formatting/linting (only if tools are installed in the sandbox venv).
|
|
76
121
|
from auto_code_fixer.command_runner import run_command
|
|
77
122
|
|
|
123
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
124
|
+
|
|
78
125
|
if fmt == "black":
|
|
79
|
-
rc,
|
|
126
|
+
rc, _out, err = run_command(
|
|
80
127
|
"python -m black .",
|
|
81
128
|
timeout_s=max(timeout_s, 60),
|
|
82
129
|
python_exe=venv_python,
|
|
@@ -96,7 +143,7 @@ def fix_file(
|
|
|
96
143
|
cmd = "python -m ruff check ."
|
|
97
144
|
if lint_fix:
|
|
98
145
|
cmd += " --fix"
|
|
99
|
-
rc,
|
|
146
|
+
rc, _out, err = run_command(
|
|
100
147
|
cmd,
|
|
101
148
|
timeout_s=max(timeout_s, 60),
|
|
102
149
|
python_exe=venv_python,
|
|
@@ -111,11 +158,25 @@ def fix_file(
|
|
|
111
158
|
else:
|
|
112
159
|
log(f"ruff reported issues (rc={rc}): {err}", "DEBUG")
|
|
113
160
|
|
|
161
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
162
|
+
ch = changed_py_files(before, after)
|
|
163
|
+
if ch:
|
|
164
|
+
changed_sandbox_files.update({os.path.abspath(p) for p in ch})
|
|
165
|
+
|
|
114
166
|
changed_sandbox_files: set[str] = set()
|
|
115
167
|
|
|
116
168
|
for attempt in range(max_retries):
|
|
117
169
|
log(f"Run attempt #{attempt + 1}")
|
|
118
170
|
|
|
171
|
+
changed_before = set(changed_sandbox_files)
|
|
172
|
+
|
|
173
|
+
attempt_report: ReportAttempt | None = None
|
|
174
|
+
if report is not None:
|
|
175
|
+
attempt_report = ReportAttempt(
|
|
176
|
+
index=attempt + 1,
|
|
177
|
+
run_cmd=run_cmd or f"python {os.path.relpath(sandbox_entry, sandbox_root)}",
|
|
178
|
+
)
|
|
179
|
+
|
|
119
180
|
# Ensure local modules resolve inside sandbox
|
|
120
181
|
if run_cmd:
|
|
121
182
|
from auto_code_fixer.command_runner import run_command
|
|
@@ -136,6 +197,11 @@ def fix_file(
|
|
|
136
197
|
extra_env={"PYTHONPATH": sandbox_root},
|
|
137
198
|
)
|
|
138
199
|
|
|
200
|
+
if attempt_report is not None:
|
|
201
|
+
attempt_report.return_code = int(retcode)
|
|
202
|
+
attempt_report.stdout = stdout
|
|
203
|
+
attempt_report.stderr = stderr
|
|
204
|
+
|
|
139
205
|
if verbose:
|
|
140
206
|
if stdout:
|
|
141
207
|
log(f"STDOUT:\n{stdout}", "DEBUG")
|
|
@@ -145,6 +211,11 @@ def fix_file(
|
|
|
145
211
|
if retcode == 0:
|
|
146
212
|
log("Script executed successfully ✅")
|
|
147
213
|
|
|
214
|
+
if attempt_report is not None and report is not None:
|
|
215
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
216
|
+
attempt_report.changed_files = delta
|
|
217
|
+
report.attempts.append(attempt_report)
|
|
218
|
+
|
|
148
219
|
# Apply sandbox changes back to project (only if we actually changed something)
|
|
149
220
|
if attempt > 0 and is_in_project(file_path, project_root) and changed_sandbox_files:
|
|
150
221
|
rel_changes = [os.path.relpath(p, sandbox_root) for p in sorted(changed_sandbox_files)]
|
|
@@ -202,6 +273,24 @@ def fix_file(
|
|
|
202
273
|
log(f"Backup created: {bak}", "DEBUG")
|
|
203
274
|
|
|
204
275
|
os.makedirs(os.path.dirname(dst_real), exist_ok=True)
|
|
276
|
+
|
|
277
|
+
if report is not None:
|
|
278
|
+
try:
|
|
279
|
+
old_text = ""
|
|
280
|
+
if os.path.exists(dst_real):
|
|
281
|
+
old_text = open(dst_real, encoding="utf-8").read()
|
|
282
|
+
new_text = open(p_real, encoding="utf-8").read()
|
|
283
|
+
rel_report = os.path.relpath(dst_real, project_root)
|
|
284
|
+
report.files_touched = sorted(set(report.files_touched + [rel_report]))
|
|
285
|
+
report.diffs[rel_report] = unified_diff(
|
|
286
|
+
old=old_text,
|
|
287
|
+
new=new_text,
|
|
288
|
+
fromfile=f"a/{rel_report}",
|
|
289
|
+
tofile=f"b/{rel_report}",
|
|
290
|
+
)
|
|
291
|
+
except Exception:
|
|
292
|
+
pass
|
|
293
|
+
|
|
205
294
|
shutil.copy(p_real, dst_real)
|
|
206
295
|
log(f"File updated: {dst_real}")
|
|
207
296
|
|
|
@@ -246,8 +335,55 @@ def fix_file(
|
|
|
246
335
|
if check_and_install_missing_lib(stderr, python_exe=venv_python, project_root=sandbox_root):
|
|
247
336
|
log("Missing dependency installed (venv), retrying…")
|
|
248
337
|
time.sleep(1)
|
|
338
|
+
if attempt_report is not None and report is not None:
|
|
339
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
340
|
+
attempt_report.changed_files = delta
|
|
341
|
+
report.attempts.append(attempt_report)
|
|
249
342
|
continue
|
|
250
343
|
|
|
344
|
+
# Optional: ruff-first pass before invoking the LLM.
|
|
345
|
+
if ruff_first:
|
|
346
|
+
changed = _run_ruff_first_pass()
|
|
347
|
+
if attempt_report is not None:
|
|
348
|
+
attempt_report.ruff_first_applied = bool(changed)
|
|
349
|
+
|
|
350
|
+
if changed:
|
|
351
|
+
# Re-run command after ruff fixes.
|
|
352
|
+
if run_cmd:
|
|
353
|
+
from auto_code_fixer.command_runner import run_command
|
|
354
|
+
|
|
355
|
+
rc_r, out_r, err_r = run_command(
|
|
356
|
+
run_cmd,
|
|
357
|
+
timeout_s=timeout_s,
|
|
358
|
+
python_exe=venv_python,
|
|
359
|
+
cwd=sandbox_root,
|
|
360
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
361
|
+
)
|
|
362
|
+
else:
|
|
363
|
+
rc_r, out_r, err_r = run_code(
|
|
364
|
+
sandbox_entry,
|
|
365
|
+
timeout_s=timeout_s,
|
|
366
|
+
python_exe=venv_python,
|
|
367
|
+
cwd=sandbox_root,
|
|
368
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if rc_r == 0:
|
|
372
|
+
log("Ruff-first fixed the issue; continuing", "DEBUG")
|
|
373
|
+
if attempt_report is not None and report is not None:
|
|
374
|
+
attempt_report.return_code = int(rc_r)
|
|
375
|
+
attempt_report.stdout = out_r
|
|
376
|
+
attempt_report.stderr = err_r
|
|
377
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
378
|
+
attempt_report.changed_files = delta
|
|
379
|
+
report.attempts.append(attempt_report)
|
|
380
|
+
continue
|
|
381
|
+
else:
|
|
382
|
+
# Use the updated stderr/stdout for the LLM selection.
|
|
383
|
+
stdout = out_r
|
|
384
|
+
stderr = err_r
|
|
385
|
+
retcode = rc_r
|
|
386
|
+
|
|
251
387
|
# Pick the most relevant local file from the traceback (entry or imported file)
|
|
252
388
|
from auto_code_fixer.traceback_utils import pick_relevant_file
|
|
253
389
|
|
|
@@ -282,6 +418,12 @@ def fix_file(
|
|
|
282
418
|
except Exception:
|
|
283
419
|
pass
|
|
284
420
|
|
|
421
|
+
if attempt_report is not None:
|
|
422
|
+
try:
|
|
423
|
+
attempt_report.selected_file = os.path.relpath(target_file, sandbox_root)
|
|
424
|
+
except Exception:
|
|
425
|
+
attempt_report.selected_file = target_file
|
|
426
|
+
|
|
285
427
|
log(f"Sending {os.path.relpath(target_file, sandbox_root)} + error to GPT 🧠", "DEBUG")
|
|
286
428
|
|
|
287
429
|
applied_any = False
|
|
@@ -459,11 +601,379 @@ def fix_file(
|
|
|
459
601
|
log("Code updated by GPT ✏️")
|
|
460
602
|
time.sleep(1)
|
|
461
603
|
|
|
604
|
+
if attempt_report is not None and report is not None:
|
|
605
|
+
delta = sorted({os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)})
|
|
606
|
+
attempt_report.changed_files = delta
|
|
607
|
+
report.attempts.append(attempt_report)
|
|
608
|
+
|
|
462
609
|
log("Failed to auto-fix file after max retries ❌", "ERROR")
|
|
463
610
|
cleanup_sandbox()
|
|
464
611
|
return False
|
|
465
612
|
|
|
466
613
|
|
|
614
|
+
def fixpack(
|
|
615
|
+
target_path: str,
|
|
616
|
+
project_root: str,
|
|
617
|
+
api_key: str | None,
|
|
618
|
+
ask: bool,
|
|
619
|
+
verbose: bool,
|
|
620
|
+
*,
|
|
621
|
+
dry_run: bool,
|
|
622
|
+
model: str | None,
|
|
623
|
+
timeout_s: int,
|
|
624
|
+
max_retries: int,
|
|
625
|
+
run_cmd: str | None,
|
|
626
|
+
patch_protocol: bool,
|
|
627
|
+
max_files_changed: int,
|
|
628
|
+
context_files: int,
|
|
629
|
+
approve: bool,
|
|
630
|
+
max_diff_lines: int,
|
|
631
|
+
max_file_bytes: int,
|
|
632
|
+
max_total_bytes: int,
|
|
633
|
+
post_apply_check: bool,
|
|
634
|
+
ruff_first: bool,
|
|
635
|
+
report: FixReport | None,
|
|
636
|
+
) -> bool:
|
|
637
|
+
"""Run a command (pytest by default) in a full-project sandbox and iteratively fix failures."""
|
|
638
|
+
|
|
639
|
+
from auto_code_fixer.sandbox import make_sandbox_project
|
|
640
|
+
from auto_code_fixer.venv_manager import create_venv
|
|
641
|
+
|
|
642
|
+
project_root = os.path.abspath(project_root)
|
|
643
|
+
target_path = os.path.abspath(target_path)
|
|
644
|
+
|
|
645
|
+
sandbox_root = make_sandbox_project(project_root=project_root)
|
|
646
|
+
if report is not None:
|
|
647
|
+
report.sandbox_root = sandbox_root
|
|
648
|
+
|
|
649
|
+
import atexit
|
|
650
|
+
|
|
651
|
+
def cleanup_sandbox() -> None:
|
|
652
|
+
try:
|
|
653
|
+
shutil.rmtree(sandbox_root)
|
|
654
|
+
except FileNotFoundError:
|
|
655
|
+
return
|
|
656
|
+
except Exception as e:
|
|
657
|
+
log(f"WARN: failed to delete sandbox dir {sandbox_root}: {e}")
|
|
658
|
+
|
|
659
|
+
atexit.register(cleanup_sandbox)
|
|
660
|
+
|
|
661
|
+
venv_python = create_venv(sandbox_root)
|
|
662
|
+
changed_sandbox_files: set[str] = set()
|
|
663
|
+
|
|
664
|
+
from auto_code_fixer.command_runner import run_command
|
|
665
|
+
|
|
666
|
+
effective_cmd = run_cmd or "pytest -q"
|
|
667
|
+
|
|
668
|
+
for attempt in range(max_retries):
|
|
669
|
+
log(f"Fixpack attempt #{attempt + 1}: running `{effective_cmd}`")
|
|
670
|
+
|
|
671
|
+
changed_before = set(changed_sandbox_files)
|
|
672
|
+
|
|
673
|
+
attempt_report: ReportAttempt | None = None
|
|
674
|
+
if report is not None:
|
|
675
|
+
attempt_report = ReportAttempt(index=attempt + 1, run_cmd=effective_cmd)
|
|
676
|
+
|
|
677
|
+
rc, out, err = run_command(
|
|
678
|
+
effective_cmd,
|
|
679
|
+
timeout_s=timeout_s,
|
|
680
|
+
python_exe=venv_python,
|
|
681
|
+
cwd=sandbox_root,
|
|
682
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
combined = (out or "") + "\n" + (err or "")
|
|
686
|
+
|
|
687
|
+
if attempt_report is not None:
|
|
688
|
+
attempt_report.return_code = int(rc)
|
|
689
|
+
attempt_report.stdout = out
|
|
690
|
+
attempt_report.stderr = err
|
|
691
|
+
|
|
692
|
+
if verbose:
|
|
693
|
+
if out:
|
|
694
|
+
log(f"STDOUT:\n{out}", "DEBUG")
|
|
695
|
+
if err:
|
|
696
|
+
log(f"STDERR:\n{err}", "DEBUG")
|
|
697
|
+
|
|
698
|
+
if rc == 0:
|
|
699
|
+
log("Fixpack command succeeded ✅")
|
|
700
|
+
|
|
701
|
+
if attempt_report is not None and report is not None:
|
|
702
|
+
attempt_report.changed_files = sorted(
|
|
703
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
704
|
+
)
|
|
705
|
+
report.attempts.append(attempt_report)
|
|
706
|
+
|
|
707
|
+
# Apply all changed sandbox files back to the real project.
|
|
708
|
+
if changed_sandbox_files:
|
|
709
|
+
rel_changes = [os.path.relpath(p, sandbox_root) for p in sorted(changed_sandbox_files)]
|
|
710
|
+
|
|
711
|
+
if ask:
|
|
712
|
+
confirm = input(
|
|
713
|
+
"Overwrite original files with fixed versions?\n"
|
|
714
|
+
+ "\n".join(f"- {c}" for c in rel_changes)
|
|
715
|
+
+ "\n(y/n): "
|
|
716
|
+
).strip().lower()
|
|
717
|
+
if confirm != "y":
|
|
718
|
+
log("User declined overwrite", "WARN")
|
|
719
|
+
cleanup_sandbox()
|
|
720
|
+
return False
|
|
721
|
+
|
|
722
|
+
if dry_run:
|
|
723
|
+
log("DRY RUN: would apply fixes:\n" + "\n".join(rel_changes), "WARN")
|
|
724
|
+
else:
|
|
725
|
+
# Compute diffs for report before copying.
|
|
726
|
+
for p in sorted(changed_sandbox_files):
|
|
727
|
+
rel = os.path.relpath(p, sandbox_root)
|
|
728
|
+
dst = os.path.join(project_root, rel)
|
|
729
|
+
if report is not None:
|
|
730
|
+
try:
|
|
731
|
+
old_text = ""
|
|
732
|
+
if os.path.exists(dst):
|
|
733
|
+
old_text = open(dst, encoding="utf-8").read()
|
|
734
|
+
new_text = open(p, encoding="utf-8").read()
|
|
735
|
+
report.files_touched = sorted(set(report.files_touched + [rel]))
|
|
736
|
+
report.diffs[rel] = unified_diff(
|
|
737
|
+
old=old_text,
|
|
738
|
+
new=new_text,
|
|
739
|
+
fromfile=f"a/{rel}",
|
|
740
|
+
tofile=f"b/{rel}",
|
|
741
|
+
)
|
|
742
|
+
except Exception:
|
|
743
|
+
pass
|
|
744
|
+
|
|
745
|
+
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
|
746
|
+
shutil.copy(p, dst)
|
|
747
|
+
log(f"File updated: {dst}")
|
|
748
|
+
|
|
749
|
+
if post_apply_check:
|
|
750
|
+
rc2, out2, err2 = run_command(
|
|
751
|
+
effective_cmd,
|
|
752
|
+
timeout_s=timeout_s,
|
|
753
|
+
python_exe=venv_python,
|
|
754
|
+
cwd=project_root,
|
|
755
|
+
extra_env={"PYTHONPATH": project_root},
|
|
756
|
+
)
|
|
757
|
+
if verbose:
|
|
758
|
+
if out2:
|
|
759
|
+
log(f"POST-APPLY STDOUT:\n{out2}", "DEBUG")
|
|
760
|
+
if err2:
|
|
761
|
+
log(f"POST-APPLY STDERR:\n{err2}", "DEBUG")
|
|
762
|
+
if rc2 != 0:
|
|
763
|
+
log("Post-apply command failed", "ERROR")
|
|
764
|
+
cleanup_sandbox()
|
|
765
|
+
return False
|
|
766
|
+
|
|
767
|
+
cleanup_sandbox()
|
|
768
|
+
return True
|
|
769
|
+
|
|
770
|
+
log("Fixpack command failed ❌", "ERROR")
|
|
771
|
+
print(combined)
|
|
772
|
+
|
|
773
|
+
if check_and_install_missing_lib(combined, python_exe=venv_python, project_root=sandbox_root):
|
|
774
|
+
log("Missing dependency installed (venv), retrying…")
|
|
775
|
+
time.sleep(1)
|
|
776
|
+
if attempt_report is not None and report is not None:
|
|
777
|
+
attempt_report.changed_files = sorted(
|
|
778
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
779
|
+
)
|
|
780
|
+
report.attempts.append(attempt_report)
|
|
781
|
+
continue
|
|
782
|
+
|
|
783
|
+
if ruff_first:
|
|
784
|
+
before = snapshot_py_hashes(sandbox_root)
|
|
785
|
+
rc_r1, _o1, _e1 = run_command(
|
|
786
|
+
"python -m ruff check . --fix --select I",
|
|
787
|
+
timeout_s=max(timeout_s, 60),
|
|
788
|
+
python_exe=venv_python,
|
|
789
|
+
cwd=sandbox_root,
|
|
790
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
791
|
+
)
|
|
792
|
+
run_command(
|
|
793
|
+
"python -m ruff format .",
|
|
794
|
+
timeout_s=max(timeout_s, 60),
|
|
795
|
+
python_exe=venv_python,
|
|
796
|
+
cwd=sandbox_root,
|
|
797
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
798
|
+
)
|
|
799
|
+
after = snapshot_py_hashes(sandbox_root)
|
|
800
|
+
ch = changed_py_files(before, after)
|
|
801
|
+
if ch:
|
|
802
|
+
changed_sandbox_files.update(ch)
|
|
803
|
+
if attempt_report is not None:
|
|
804
|
+
attempt_report.ruff_first_applied = True
|
|
805
|
+
|
|
806
|
+
# Re-run after ruff
|
|
807
|
+
rc3, out3, err3 = run_command(
|
|
808
|
+
effective_cmd,
|
|
809
|
+
timeout_s=timeout_s,
|
|
810
|
+
python_exe=venv_python,
|
|
811
|
+
cwd=sandbox_root,
|
|
812
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
813
|
+
)
|
|
814
|
+
if rc3 == 0:
|
|
815
|
+
if attempt_report is not None:
|
|
816
|
+
attempt_report.return_code = int(rc3)
|
|
817
|
+
attempt_report.stdout = out3
|
|
818
|
+
attempt_report.stderr = err3
|
|
819
|
+
if attempt_report is not None and report is not None:
|
|
820
|
+
attempt_report.changed_files = sorted(
|
|
821
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
822
|
+
)
|
|
823
|
+
report.attempts.append(attempt_report)
|
|
824
|
+
continue
|
|
825
|
+
combined = (out3 or "") + "\n" + (err3 or "")
|
|
826
|
+
|
|
827
|
+
from auto_code_fixer.traceback_utils import pick_relevant_file
|
|
828
|
+
|
|
829
|
+
target_file = pick_relevant_file(combined, sandbox_root=sandbox_root)
|
|
830
|
+
if not target_file:
|
|
831
|
+
log("Could not determine failing file from output; stopping.", "ERROR")
|
|
832
|
+
cleanup_sandbox()
|
|
833
|
+
return False
|
|
834
|
+
|
|
835
|
+
# Ensure inside sandbox
|
|
836
|
+
sr = os.path.realpath(os.path.abspath(sandbox_root))
|
|
837
|
+
tf = os.path.realpath(os.path.abspath(target_file))
|
|
838
|
+
if not (tf.startswith(sr + os.sep) or tf == sr):
|
|
839
|
+
log("Suspicious target file path outside sandbox; stopping.", "ERROR")
|
|
840
|
+
cleanup_sandbox()
|
|
841
|
+
return False
|
|
842
|
+
|
|
843
|
+
if attempt_report is not None:
|
|
844
|
+
attempt_report.selected_file = os.path.relpath(tf, sandbox_root)
|
|
845
|
+
|
|
846
|
+
applied_any = False
|
|
847
|
+
|
|
848
|
+
if patch_protocol:
|
|
849
|
+
try:
|
|
850
|
+
from auto_code_fixer.fixer import fix_code_with_gpt_patch_protocol
|
|
851
|
+
from auto_code_fixer.patch_protocol import (
|
|
852
|
+
parse_patch_protocol_response,
|
|
853
|
+
validate_and_resolve_patch_files,
|
|
854
|
+
)
|
|
855
|
+
from auto_code_fixer.patcher import safe_read, atomic_write_verified_sha256
|
|
856
|
+
from auto_code_fixer.utils import find_imports
|
|
857
|
+
from auto_code_fixer.approval import (
|
|
858
|
+
PlannedChange,
|
|
859
|
+
UserAbort,
|
|
860
|
+
guard_planned_changes,
|
|
861
|
+
prompt_approve_file_by_file,
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
hint_paths = [os.path.relpath(tf, sandbox_root)]
|
|
865
|
+
|
|
866
|
+
ctx_pairs: list[tuple[str, str]] = []
|
|
867
|
+
if context_files and context_files > 0:
|
|
868
|
+
rels: list[str] = []
|
|
869
|
+
for abs_p in find_imports(tf, sandbox_root):
|
|
870
|
+
try:
|
|
871
|
+
rels.append(os.path.relpath(abs_p, sandbox_root))
|
|
872
|
+
except Exception:
|
|
873
|
+
continue
|
|
874
|
+
rels = [r for r in rels if r not in hint_paths]
|
|
875
|
+
for rel in rels[:context_files]:
|
|
876
|
+
abs_p = os.path.join(sandbox_root, rel)
|
|
877
|
+
if os.path.exists(abs_p):
|
|
878
|
+
try:
|
|
879
|
+
ctx_pairs.append((rel, safe_read(abs_p)))
|
|
880
|
+
except Exception:
|
|
881
|
+
pass
|
|
882
|
+
|
|
883
|
+
raw = fix_code_with_gpt_patch_protocol(
|
|
884
|
+
sandbox_root=sandbox_root,
|
|
885
|
+
error_log=combined,
|
|
886
|
+
api_key=api_key,
|
|
887
|
+
model=model,
|
|
888
|
+
hint_paths=hint_paths,
|
|
889
|
+
context_files=ctx_pairs,
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
patch_files = parse_patch_protocol_response(raw)
|
|
893
|
+
if len(patch_files) > max_files_changed:
|
|
894
|
+
raise ValueError(
|
|
895
|
+
f"Patch wants to change {len(patch_files)} files, exceeding --max-files-changed={max_files_changed}"
|
|
896
|
+
)
|
|
897
|
+
|
|
898
|
+
resolved = validate_and_resolve_patch_files(patch_files, sandbox_root=sandbox_root)
|
|
899
|
+
sha_by_rel = {pf.path: pf.sha256 for pf in patch_files}
|
|
900
|
+
|
|
901
|
+
planned: list[PlannedChange] = []
|
|
902
|
+
for abs_path, new_content in resolved:
|
|
903
|
+
old = ""
|
|
904
|
+
if os.path.exists(abs_path):
|
|
905
|
+
old = safe_read(abs_path)
|
|
906
|
+
if new_content.strip() == (old or "").strip():
|
|
907
|
+
continue
|
|
908
|
+
rel = os.path.relpath(abs_path, sandbox_root)
|
|
909
|
+
planned.append(
|
|
910
|
+
PlannedChange(
|
|
911
|
+
abs_path=abs_path,
|
|
912
|
+
rel_path=rel,
|
|
913
|
+
old_content=old,
|
|
914
|
+
new_content=new_content,
|
|
915
|
+
)
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
if planned:
|
|
919
|
+
guard_planned_changes(
|
|
920
|
+
planned,
|
|
921
|
+
max_file_bytes=max_file_bytes,
|
|
922
|
+
max_total_bytes=max_total_bytes,
|
|
923
|
+
max_diff_lines=max_diff_lines,
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
if approve:
|
|
927
|
+
try:
|
|
928
|
+
planned = prompt_approve_file_by_file(planned)
|
|
929
|
+
except UserAbort:
|
|
930
|
+
planned = []
|
|
931
|
+
|
|
932
|
+
for chg in planned:
|
|
933
|
+
expected_sha = sha_by_rel.get(chg.rel_path)
|
|
934
|
+
if not expected_sha:
|
|
935
|
+
raise ValueError(f"Missing sha256 for {chg.rel_path} in patch protocol payload")
|
|
936
|
+
atomic_write_verified_sha256(chg.abs_path, chg.new_content, expected_sha)
|
|
937
|
+
changed_sandbox_files.add(os.path.abspath(chg.abs_path))
|
|
938
|
+
applied_any = True
|
|
939
|
+
|
|
940
|
+
except Exception as e:
|
|
941
|
+
log(f"Patch protocol failed ({e}); falling back to full-text mode", "WARN")
|
|
942
|
+
|
|
943
|
+
if not applied_any:
|
|
944
|
+
fixed_code = fix_code_with_gpt(
|
|
945
|
+
original_code=open(tf, encoding="utf-8").read(),
|
|
946
|
+
error_log=combined,
|
|
947
|
+
api_key=api_key,
|
|
948
|
+
model=model,
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
from auto_code_fixer.patcher import safe_write
|
|
952
|
+
|
|
953
|
+
if fixed_code.strip() == open(tf, encoding="utf-8").read().strip():
|
|
954
|
+
log("GPT returned no changes. Stopping.", "WARN")
|
|
955
|
+
cleanup_sandbox()
|
|
956
|
+
return False
|
|
957
|
+
|
|
958
|
+
safe_write(tf, fixed_code)
|
|
959
|
+
changed_sandbox_files.add(os.path.abspath(tf))
|
|
960
|
+
applied_any = True
|
|
961
|
+
|
|
962
|
+
if applied_any:
|
|
963
|
+
log("Code updated by GPT ✏️")
|
|
964
|
+
time.sleep(1)
|
|
965
|
+
|
|
966
|
+
if attempt_report is not None and report is not None:
|
|
967
|
+
attempt_report.changed_files = sorted(
|
|
968
|
+
{os.path.relpath(p, sandbox_root) for p in (set(changed_sandbox_files) - changed_before)}
|
|
969
|
+
)
|
|
970
|
+
report.attempts.append(attempt_report)
|
|
971
|
+
|
|
972
|
+
log("Failed to fixpack after max retries ❌", "ERROR")
|
|
973
|
+
cleanup_sandbox()
|
|
974
|
+
return False
|
|
975
|
+
|
|
976
|
+
|
|
467
977
|
def main():
|
|
468
978
|
parser = argparse.ArgumentParser(
|
|
469
979
|
description="Auto-fix Python code using OpenAI (advanced sandbox + retry loop)"
|
|
@@ -478,7 +988,7 @@ def main():
|
|
|
478
988
|
parser.add_argument(
|
|
479
989
|
"entry_file",
|
|
480
990
|
nargs="?",
|
|
481
|
-
help="Path to the main Python file",
|
|
991
|
+
help="Path to the main Python file (or folder/package path if --fixpack)",
|
|
482
992
|
)
|
|
483
993
|
|
|
484
994
|
parser.add_argument("--project-root", default=".")
|
|
@@ -495,6 +1005,35 @@ def main():
|
|
|
495
1005
|
"If set, it runs inside the sandbox venv."
|
|
496
1006
|
),
|
|
497
1007
|
)
|
|
1008
|
+
|
|
1009
|
+
parser.add_argument(
|
|
1010
|
+
"--fixpack",
|
|
1011
|
+
action="store_true",
|
|
1012
|
+
help=(
|
|
1013
|
+
"Fixpack mode: treat entry_file as a folder/package path, create a full-project sandbox, "
|
|
1014
|
+
"run a command (default: pytest -q), and iteratively fix failing files until green or budget."
|
|
1015
|
+
),
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
parser.add_argument(
|
|
1019
|
+
"--ruff-first",
|
|
1020
|
+
action="store_true",
|
|
1021
|
+
help=(
|
|
1022
|
+
"Before calling the LLM, try best-effort Ruff auto-fixes (including import sorting) and Ruff formatting."
|
|
1023
|
+
),
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
parser.add_argument(
|
|
1027
|
+
"--explain",
|
|
1028
|
+
action="store_true",
|
|
1029
|
+
help="Write a JSON report (report.json by default) with attempts, diffs, and final status.",
|
|
1030
|
+
)
|
|
1031
|
+
|
|
1032
|
+
parser.add_argument(
|
|
1033
|
+
"--report",
|
|
1034
|
+
default=None,
|
|
1035
|
+
help="Path to write the JSON report (implies --explain). Default: ./report.json",
|
|
1036
|
+
)
|
|
498
1037
|
parser.add_argument(
|
|
499
1038
|
"--ai-plan",
|
|
500
1039
|
action="store_true",
|
|
@@ -638,29 +1177,75 @@ def main():
|
|
|
638
1177
|
else:
|
|
639
1178
|
post_apply_check = bool(args.run)
|
|
640
1179
|
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
1180
|
+
report_path = args.report or ("report.json" if args.explain else None)
|
|
1181
|
+
report: FixReport | None = None
|
|
1182
|
+
if report_path is not None:
|
|
1183
|
+
report = FixReport(
|
|
1184
|
+
version=__version__,
|
|
1185
|
+
mode="fixpack" if args.fixpack else "file",
|
|
1186
|
+
project_root=os.path.abspath(args.project_root),
|
|
1187
|
+
target=os.path.abspath(args.entry_file),
|
|
1188
|
+
run_cmd=args.run or ("pytest -q" if args.fixpack else None),
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
try:
|
|
1192
|
+
if args.fixpack:
|
|
1193
|
+
ok = fixpack(
|
|
1194
|
+
args.entry_file,
|
|
1195
|
+
args.project_root,
|
|
1196
|
+
args.api_key,
|
|
1197
|
+
ask,
|
|
1198
|
+
verbose,
|
|
1199
|
+
dry_run=args.dry_run,
|
|
1200
|
+
model=args.model,
|
|
1201
|
+
timeout_s=args.timeout,
|
|
1202
|
+
max_retries=args.max_retries,
|
|
1203
|
+
run_cmd=args.run,
|
|
1204
|
+
patch_protocol=not args.legacy_mode,
|
|
1205
|
+
max_files_changed=args.max_files_changed,
|
|
1206
|
+
context_files=args.context_files,
|
|
1207
|
+
approve=args.approve,
|
|
1208
|
+
max_diff_lines=args.max_diff_lines,
|
|
1209
|
+
max_file_bytes=args.max_file_bytes,
|
|
1210
|
+
max_total_bytes=args.max_total_bytes,
|
|
1211
|
+
post_apply_check=post_apply_check,
|
|
1212
|
+
ruff_first=args.ruff_first,
|
|
1213
|
+
report=report,
|
|
1214
|
+
)
|
|
1215
|
+
else:
|
|
1216
|
+
ok = fix_file(
|
|
1217
|
+
args.entry_file,
|
|
1218
|
+
args.project_root,
|
|
1219
|
+
args.api_key,
|
|
1220
|
+
ask,
|
|
1221
|
+
verbose,
|
|
1222
|
+
dry_run=args.dry_run,
|
|
1223
|
+
model=args.model,
|
|
1224
|
+
timeout_s=args.timeout,
|
|
1225
|
+
max_retries=args.max_retries,
|
|
1226
|
+
run_cmd=args.run,
|
|
1227
|
+
patch_protocol=not args.legacy_mode,
|
|
1228
|
+
max_files_changed=args.max_files_changed,
|
|
1229
|
+
context_files=args.context_files,
|
|
1230
|
+
approve=args.approve,
|
|
1231
|
+
max_diff_lines=args.max_diff_lines,
|
|
1232
|
+
max_file_bytes=args.max_file_bytes,
|
|
1233
|
+
max_total_bytes=args.max_total_bytes,
|
|
1234
|
+
post_apply_check=post_apply_check,
|
|
1235
|
+
fmt=args.format,
|
|
1236
|
+
lint=args.lint,
|
|
1237
|
+
lint_fix=args.fix,
|
|
1238
|
+
ruff_first=args.ruff_first,
|
|
1239
|
+
report=report,
|
|
1240
|
+
)
|
|
1241
|
+
finally:
|
|
1242
|
+
if report is not None and report_path is not None:
|
|
1243
|
+
import datetime as _dt
|
|
1244
|
+
|
|
1245
|
+
report.finished_at = _dt.datetime.now(tz=_dt.timezone.utc).isoformat()
|
|
1246
|
+
report.ok = bool(locals().get("ok", False))
|
|
1247
|
+
report.write_json(report_path)
|
|
1248
|
+
log(f"Wrote report: {os.path.abspath(report_path)}", "DEBUG")
|
|
664
1249
|
|
|
665
1250
|
raise SystemExit(0 if ok else 2)
|
|
666
1251
|
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import datetime as _dt
|
|
3
|
+
import difflib
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def utc_now_iso() -> str:
|
|
10
|
+
return _dt.datetime.now(tz=_dt.timezone.utc).isoformat()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def unified_diff(*, old: str, new: str, fromfile: str, tofile: str) -> str:
|
|
14
|
+
return "".join(
|
|
15
|
+
difflib.unified_diff(
|
|
16
|
+
(old or "").splitlines(keepends=True),
|
|
17
|
+
(new or "").splitlines(keepends=True),
|
|
18
|
+
fromfile=fromfile,
|
|
19
|
+
tofile=tofile,
|
|
20
|
+
)
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclasses.dataclass
|
|
25
|
+
class ReportAttempt:
|
|
26
|
+
index: int
|
|
27
|
+
run_cmd: str
|
|
28
|
+
return_code: int = 0
|
|
29
|
+
stdout: str | None = None
|
|
30
|
+
stderr: str | None = None
|
|
31
|
+
selected_file: str | None = None
|
|
32
|
+
ruff_first_applied: bool = False
|
|
33
|
+
changed_files: list[str] = dataclasses.field(default_factory=list)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclasses.dataclass
|
|
37
|
+
class FixReport:
|
|
38
|
+
tool: str = "auto-code-fixer"
|
|
39
|
+
version: str | None = None
|
|
40
|
+
mode: str = "file" # file|fixpack
|
|
41
|
+
project_root: str | None = None
|
|
42
|
+
sandbox_root: str | None = None
|
|
43
|
+
target: str | None = None
|
|
44
|
+
run_cmd: str | None = None
|
|
45
|
+
started_at: str = dataclasses.field(default_factory=utc_now_iso)
|
|
46
|
+
finished_at: str | None = None
|
|
47
|
+
ok: bool | None = None
|
|
48
|
+
attempts: list[ReportAttempt] = dataclasses.field(default_factory=list)
|
|
49
|
+
files_touched: list[str] = dataclasses.field(default_factory=list)
|
|
50
|
+
diffs: dict[str, str] = dataclasses.field(default_factory=dict) # relpath -> unified diff
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> dict[str, Any]:
|
|
53
|
+
d = dataclasses.asdict(self)
|
|
54
|
+
# dataclasses.asdict converts nested dataclasses, OK
|
|
55
|
+
return d
|
|
56
|
+
|
|
57
|
+
def write_json(self, path: str) -> None:
|
|
58
|
+
os.makedirs(os.path.dirname(os.path.abspath(path)) or ".", exist_ok=True)
|
|
59
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
60
|
+
json.dump(self.to_dict(), f, indent=2, sort_keys=False)
|
|
61
|
+
f.write("\n")
|
auto_code_fixer/sandbox.py
CHANGED
|
@@ -35,6 +35,41 @@ def make_sandbox(*, entry_file: str, project_root: str) -> tuple[str, str]:
|
|
|
35
35
|
return sandbox_root, sandbox_entry
|
|
36
36
|
|
|
37
37
|
|
|
38
|
+
def make_sandbox_project(*, project_root: str) -> str:
|
|
39
|
+
"""Create a sandbox directory with a full copy of project_root.
|
|
40
|
+
|
|
41
|
+
This is used for "Fixpack" runs where we execute a command (e.g., pytest) across
|
|
42
|
+
a package/folder and iteratively fix failures.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
project_root = os.path.abspath(project_root)
|
|
46
|
+
sandbox_root = tempfile.mkdtemp(prefix="codefix_sandbox_")
|
|
47
|
+
|
|
48
|
+
def _ignore(_dir: str, names: list[str]):
|
|
49
|
+
skip = {".git",
|
|
50
|
+
".venv",
|
|
51
|
+
"__pycache__",
|
|
52
|
+
"build",
|
|
53
|
+
"dist",
|
|
54
|
+
".mypy_cache",
|
|
55
|
+
".ruff_cache",
|
|
56
|
+
".pytest_cache",
|
|
57
|
+
}
|
|
58
|
+
return {n for n in names if n in skip}
|
|
59
|
+
|
|
60
|
+
# Copy everything into sandbox_root.
|
|
61
|
+
# Note: copytree requires dest not to exist; copy contents manually.
|
|
62
|
+
for item in os.listdir(project_root):
|
|
63
|
+
src = os.path.join(project_root, item)
|
|
64
|
+
dst = os.path.join(sandbox_root, item)
|
|
65
|
+
if os.path.isdir(src):
|
|
66
|
+
shutil.copytree(src, dst, ignore=_ignore)
|
|
67
|
+
else:
|
|
68
|
+
shutil.copy2(src, dst)
|
|
69
|
+
|
|
70
|
+
return sandbox_root
|
|
71
|
+
|
|
72
|
+
|
|
38
73
|
def apply_sandbox_back(*, sandbox_root: str, project_root: str, changed_paths: list[str]) -> None:
|
|
39
74
|
"""Copy changed sandbox files back into project_root."""
|
|
40
75
|
|
auto_code_fixer/utils.py
CHANGED
|
@@ -104,3 +104,45 @@ def discover_all_files(entry_file: str) -> list[str]:
|
|
|
104
104
|
|
|
105
105
|
def is_in_project(file_path: str, project_root: str) -> bool:
|
|
106
106
|
return os.path.abspath(file_path).startswith(os.path.abspath(project_root))
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def sha256_text(text: str) -> str:
|
|
110
|
+
import hashlib
|
|
111
|
+
|
|
112
|
+
return hashlib.sha256((text or "").encode("utf-8")).hexdigest()
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def snapshot_py_hashes(root: str) -> dict[str, str]:
|
|
116
|
+
"""Return mapping of absolute .py path -> sha256 of contents."""
|
|
117
|
+
|
|
118
|
+
import hashlib
|
|
119
|
+
|
|
120
|
+
out: dict[str, str] = {}
|
|
121
|
+
for dirpath, dirnames, filenames in os.walk(root):
|
|
122
|
+
# Keep the walk light.
|
|
123
|
+
dirnames[:] = [
|
|
124
|
+
d
|
|
125
|
+
for d in dirnames
|
|
126
|
+
if d not in {".git", ".venv", "__pycache__", "build", "dist", ".mypy_cache", ".ruff_cache"}
|
|
127
|
+
]
|
|
128
|
+
for fn in filenames:
|
|
129
|
+
if not fn.endswith(".py"):
|
|
130
|
+
continue
|
|
131
|
+
ap = os.path.join(dirpath, fn)
|
|
132
|
+
try:
|
|
133
|
+
data = Path(ap).read_bytes()
|
|
134
|
+
except Exception:
|
|
135
|
+
continue
|
|
136
|
+
out[os.path.abspath(ap)] = hashlib.sha256(data).hexdigest()
|
|
137
|
+
return out
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def changed_py_files(before: dict[str, str], after: dict[str, str]) -> set[str]:
|
|
141
|
+
"""Return absolute paths for .py files whose hash changed or were added/removed."""
|
|
142
|
+
|
|
143
|
+
changed: set[str] = set()
|
|
144
|
+
all_paths = set(before) | set(after)
|
|
145
|
+
for p in all_paths:
|
|
146
|
+
if before.get(p) != after.get(p):
|
|
147
|
+
changed.add(os.path.abspath(p))
|
|
148
|
+
return changed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: auto-code-fixer
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.4.0
|
|
4
4
|
Summary: Automatically fix Python code using ChatGPT
|
|
5
5
|
Author-email: Arif Shah <ashah7775@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -108,6 +108,15 @@ auto-code-fixer path/to/main.py --project-root . --dry-run
|
|
|
108
108
|
|
|
109
109
|
## Advanced options
|
|
110
110
|
|
|
111
|
+
### Fixpack mode (folder/package runner)
|
|
112
|
+
Fixpack runs a command (default: `pytest -q`) against a **full-project sandbox**, then iteratively fixes the failing file(s) until green or you hit `--max-retries`.
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
auto-code-fixer path/to/package --project-root . --fixpack --no-ask
|
|
116
|
+
# equivalent explicit command:
|
|
117
|
+
auto-code-fixer path/to/package --project-root . --fixpack --run "pytest -q" --no-ask
|
|
118
|
+
```
|
|
119
|
+
|
|
111
120
|
### Run a custom command (pytest, etc.)
|
|
112
121
|
Instead of `python main.py`, run tests:
|
|
113
122
|
|
|
@@ -156,6 +165,13 @@ To disable this and use legacy full-text mode only:
|
|
|
156
165
|
auto-code-fixer main.py --legacy-mode
|
|
157
166
|
```
|
|
158
167
|
|
|
168
|
+
### Ruff-first mode (best-effort)
|
|
169
|
+
Before calling the LLM, you can try Ruff auto-fixes (including import sorting) and Ruff formatting:
|
|
170
|
+
|
|
171
|
+
```bash
|
|
172
|
+
auto-code-fixer path/to/package --project-root . --fixpack --ruff-first
|
|
173
|
+
```
|
|
174
|
+
|
|
159
175
|
### Optional formatting / linting (best-effort)
|
|
160
176
|
```bash
|
|
161
177
|
auto-code-fixer main.py --format black
|
|
@@ -163,6 +179,15 @@ auto-code-fixer main.py --lint ruff --fix
|
|
|
163
179
|
```
|
|
164
180
|
These run inside the sandbox venv and are skipped if the tools are not installed.
|
|
165
181
|
|
|
182
|
+
### JSON report (explain)
|
|
183
|
+
Write a machine-readable report with attempts, files touched, and unified diffs:
|
|
184
|
+
|
|
185
|
+
```bash
|
|
186
|
+
auto-code-fixer main.py --explain
|
|
187
|
+
# or
|
|
188
|
+
auto-code-fixer main.py --report /tmp/report.json
|
|
189
|
+
```
|
|
190
|
+
|
|
166
191
|
---
|
|
167
192
|
|
|
168
193
|
## Environment variables
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
auto_code_fixer/__init__.py,sha256=
|
|
1
|
+
auto_code_fixer/__init__.py,sha256=42STGor_9nKYXumfeV5tiyD_M8VdcddX7CEexmibPBk,22
|
|
2
2
|
auto_code_fixer/approval.py,sha256=NPSLu54maAK2RAJF_t4fq7Bs_M8K026DhDwqBFJtiwQ,2832
|
|
3
|
-
auto_code_fixer/cli.py,sha256=
|
|
3
|
+
auto_code_fixer/cli.py,sha256=Dn9A2geL58lvVWLt6K0whZfZVcBN3zpWEM5OS38Q4Ik,47713
|
|
4
4
|
auto_code_fixer/command_runner.py,sha256=6P8hGRavN5C39x-e03p02Vc805NnZH9U7e48ngb5jJI,1104
|
|
5
5
|
auto_code_fixer/fixer.py,sha256=zcgw56pRTuOLvna09lTXatD0VWwjjzBVk0OyEKfgxDM,4691
|
|
6
6
|
auto_code_fixer/installer.py,sha256=LC0jasSsPI7eHMeDxa622OoMCR1951HAXUZWp-kcmVY,1522
|
|
@@ -8,14 +8,15 @@ auto_code_fixer/models.py,sha256=JLBJutOoiOjjlT_RMPUPhWlmm1yc_nGcQqv5tY72Al0,317
|
|
|
8
8
|
auto_code_fixer/patch_protocol.py,sha256=8l1E9o-3jkO4VAI7Ulrf-1MbAshNzjQXtUkmH-0hYio,3216
|
|
9
9
|
auto_code_fixer/patcher.py,sha256=BcQTnjWazdpuEXyR2AlumFBzIk_yIrO3fGTaIqpHuiU,1811
|
|
10
10
|
auto_code_fixer/plan.py,sha256=jrZdG-f1RDxVB0tBLlTwKbCSEiOYI_RMetdzfBcyE4s,1762
|
|
11
|
+
auto_code_fixer/reporting.py,sha256=bulUsP0DIfLBmtVjZ1YdTV7RAk8Aiy81ASavcr4iano,1887
|
|
11
12
|
auto_code_fixer/runner.py,sha256=BvQm3CrwkQEDOw0tpiamSTcdu3OjbOgA801xW2zWdP8,970
|
|
12
|
-
auto_code_fixer/sandbox.py,sha256=
|
|
13
|
+
auto_code_fixer/sandbox.py,sha256=s36i7mzZFjuAWRUEumxtYfVwasFa8TehobzH-rE_kKA,2721
|
|
13
14
|
auto_code_fixer/traceback_utils.py,sha256=sbSuLO-2UBk5QPJZYJunTK9WGOpEY8mxR6WRKbtCIoM,935
|
|
14
|
-
auto_code_fixer/utils.py,sha256=
|
|
15
|
+
auto_code_fixer/utils.py,sha256=JLagGVnUj67zZdpkKSHhVcqwob30f62SyS7IbH20sHs,4369
|
|
15
16
|
auto_code_fixer/venv_manager.py,sha256=2ww8reYgLbLohh-moAD5YKM09qv_mC5yYzJRwm3XiXc,1202
|
|
16
|
-
auto_code_fixer-0.
|
|
17
|
-
auto_code_fixer-0.
|
|
18
|
-
auto_code_fixer-0.
|
|
19
|
-
auto_code_fixer-0.
|
|
20
|
-
auto_code_fixer-0.
|
|
21
|
-
auto_code_fixer-0.
|
|
17
|
+
auto_code_fixer-0.4.0.dist-info/licenses/LICENSE,sha256=hgchJNa26tjXuLztwSUDbYQxNLnAPnLk6kDXNIkC8xc,1066
|
|
18
|
+
auto_code_fixer-0.4.0.dist-info/METADATA,sha256=qyjUMCYrF1_wRp_4LQSa6t1btgwTNslKTl9rBNhSdZo,5570
|
|
19
|
+
auto_code_fixer-0.4.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
20
|
+
auto_code_fixer-0.4.0.dist-info/entry_points.txt,sha256=a-j2rkfwkrhXZ5Qbz_6_gwk6Bj7nijYR1DALjWp5Myk,61
|
|
21
|
+
auto_code_fixer-0.4.0.dist-info/top_level.txt,sha256=qUk1qznb6Qxqmxy2A3z_5dpOZlmNKHwUiLuJwH-CrAk,16
|
|
22
|
+
auto_code_fixer-0.4.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|