auto-code-fixer 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- auto_code_fixer/__init__.py +1 -1
- auto_code_fixer/cli.py +258 -15
- auto_code_fixer/fixer.py +77 -0
- auto_code_fixer/patch_protocol.py +102 -0
- auto_code_fixer/patcher.py +39 -3
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/METADATA +16 -1
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/RECORD +11 -10
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/WHEEL +0 -0
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/entry_points.txt +0 -0
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/licenses/LICENSE +0 -0
- {auto_code_fixer-0.3.4.dist-info → auto_code_fixer-0.3.6.dist-info}/top_level.txt +0 -0
auto_code_fixer/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.3.
|
|
1
|
+
__version__ = "0.3.6"
|
auto_code_fixer/cli.py
CHANGED
|
@@ -19,7 +19,26 @@ from auto_code_fixer import __version__
|
|
|
19
19
|
DEFAULT_MAX_RETRIES = 8 # can be overridden via CLI
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
def fix_file(
|
|
22
|
+
def fix_file(
|
|
23
|
+
file_path,
|
|
24
|
+
project_root,
|
|
25
|
+
api_key,
|
|
26
|
+
ask,
|
|
27
|
+
verbose,
|
|
28
|
+
*,
|
|
29
|
+
dry_run: bool,
|
|
30
|
+
model: str | None,
|
|
31
|
+
timeout_s: int,
|
|
32
|
+
max_retries: int,
|
|
33
|
+
run_cmd: str | None,
|
|
34
|
+
patch_protocol: bool,
|
|
35
|
+
max_files_changed: int,
|
|
36
|
+
context_files: int,
|
|
37
|
+
approve: bool,
|
|
38
|
+
fmt: str | None,
|
|
39
|
+
lint: str | None,
|
|
40
|
+
lint_fix: bool,
|
|
41
|
+
) -> bool:
|
|
23
42
|
log(f"Processing entry file: {file_path}")
|
|
24
43
|
|
|
25
44
|
project_root = os.path.abspath(project_root)
|
|
@@ -48,6 +67,46 @@ def fix_file(file_path, project_root, api_key, ask, verbose, *, dry_run: bool, m
|
|
|
48
67
|
|
|
49
68
|
venv_python = create_venv(sandbox_root)
|
|
50
69
|
|
|
70
|
+
def _run_optional_formatters_and_linters() -> None:
|
|
71
|
+
# Best-effort formatting/linting (only if tools are installed in the sandbox venv).
|
|
72
|
+
from auto_code_fixer.command_runner import run_command
|
|
73
|
+
|
|
74
|
+
if fmt == "black":
|
|
75
|
+
rc, out, err = run_command(
|
|
76
|
+
"python -m black .",
|
|
77
|
+
timeout_s=max(timeout_s, 60),
|
|
78
|
+
python_exe=venv_python,
|
|
79
|
+
cwd=sandbox_root,
|
|
80
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
81
|
+
)
|
|
82
|
+
if rc == 0:
|
|
83
|
+
log("Formatted with black", "DEBUG")
|
|
84
|
+
else:
|
|
85
|
+
# If black isn't installed, ignore.
|
|
86
|
+
if "No module named" in (err or "") and "black" in (err or ""):
|
|
87
|
+
log("black not installed in sandbox venv; skipping format", "DEBUG")
|
|
88
|
+
else:
|
|
89
|
+
log(f"black failed (rc={rc}): {err}", "DEBUG")
|
|
90
|
+
|
|
91
|
+
if lint == "ruff":
|
|
92
|
+
cmd = "python -m ruff check ."
|
|
93
|
+
if lint_fix:
|
|
94
|
+
cmd += " --fix"
|
|
95
|
+
rc, out, err = run_command(
|
|
96
|
+
cmd,
|
|
97
|
+
timeout_s=max(timeout_s, 60),
|
|
98
|
+
python_exe=venv_python,
|
|
99
|
+
cwd=sandbox_root,
|
|
100
|
+
extra_env={"PYTHONPATH": sandbox_root},
|
|
101
|
+
)
|
|
102
|
+
if rc == 0:
|
|
103
|
+
log("ruff check passed", "DEBUG")
|
|
104
|
+
else:
|
|
105
|
+
if "No module named" in (err or "") and "ruff" in (err or ""):
|
|
106
|
+
log("ruff not installed in sandbox venv; skipping lint", "DEBUG")
|
|
107
|
+
else:
|
|
108
|
+
log(f"ruff reported issues (rc={rc}): {err}", "DEBUG")
|
|
109
|
+
|
|
51
110
|
changed_sandbox_files: set[str] = set()
|
|
52
111
|
|
|
53
112
|
for attempt in range(max_retries):
|
|
@@ -187,24 +246,151 @@ def fix_file(file_path, project_root, api_key, ask, verbose, *, dry_run: bool, m
|
|
|
187
246
|
|
|
188
247
|
log(f"Sending {os.path.relpath(target_file, sandbox_root)} + error to GPT 🧠", "DEBUG")
|
|
189
248
|
|
|
190
|
-
|
|
191
|
-
original_code=open(target_file, encoding="utf-8").read(),
|
|
192
|
-
error_log=stderr,
|
|
193
|
-
api_key=api_key,
|
|
194
|
-
model=model,
|
|
195
|
-
)
|
|
249
|
+
applied_any = False
|
|
196
250
|
|
|
197
|
-
if
|
|
198
|
-
|
|
199
|
-
|
|
251
|
+
if patch_protocol:
|
|
252
|
+
try:
|
|
253
|
+
import difflib
|
|
200
254
|
|
|
201
|
-
|
|
202
|
-
|
|
255
|
+
from auto_code_fixer.fixer import fix_code_with_gpt_patch_protocol
|
|
256
|
+
from auto_code_fixer.patch_protocol import (
|
|
257
|
+
parse_patch_protocol_response,
|
|
258
|
+
validate_and_resolve_patch_files,
|
|
259
|
+
)
|
|
260
|
+
from auto_code_fixer.patcher import safe_read, atomic_write_verified_sha256
|
|
261
|
+
from auto_code_fixer.utils import find_imports
|
|
203
262
|
|
|
204
|
-
|
|
263
|
+
hint_paths = [os.path.relpath(target_file, sandbox_root)]
|
|
205
264
|
|
|
206
|
-
|
|
207
|
-
|
|
265
|
+
# Add a few related local files as read-only context to reduce back-and-forth.
|
|
266
|
+
ctx_pairs: list[tuple[str, str]] = []
|
|
267
|
+
if context_files and context_files > 0:
|
|
268
|
+
rels: list[str] = []
|
|
269
|
+
for abs_p in find_imports(target_file, sandbox_root):
|
|
270
|
+
try:
|
|
271
|
+
rels.append(os.path.relpath(abs_p, sandbox_root))
|
|
272
|
+
except Exception:
|
|
273
|
+
continue
|
|
274
|
+
# drop duplicates and the target itself
|
|
275
|
+
rels = [r for r in rels if r not in hint_paths]
|
|
276
|
+
for rel in rels[:context_files]:
|
|
277
|
+
abs_p = os.path.join(sandbox_root, rel)
|
|
278
|
+
if os.path.exists(abs_p):
|
|
279
|
+
try:
|
|
280
|
+
ctx_pairs.append((rel, safe_read(abs_p)))
|
|
281
|
+
except Exception:
|
|
282
|
+
pass
|
|
283
|
+
|
|
284
|
+
raw = fix_code_with_gpt_patch_protocol(
|
|
285
|
+
sandbox_root=sandbox_root,
|
|
286
|
+
error_log=stderr,
|
|
287
|
+
api_key=api_key,
|
|
288
|
+
model=model,
|
|
289
|
+
hint_paths=hint_paths,
|
|
290
|
+
context_files=ctx_pairs,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
patch_files = parse_patch_protocol_response(raw)
|
|
294
|
+
|
|
295
|
+
if len(patch_files) > max_files_changed:
|
|
296
|
+
raise ValueError(
|
|
297
|
+
f"Patch wants to change {len(patch_files)} files, exceeding --max-files-changed={max_files_changed}"
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
resolved = validate_and_resolve_patch_files(patch_files, sandbox_root=sandbox_root)
|
|
301
|
+
|
|
302
|
+
# Prepare diffs / approvals and apply atomically.
|
|
303
|
+
sha_by_rel = {pf.path: pf.sha256 for pf in patch_files}
|
|
304
|
+
|
|
305
|
+
planned: list[tuple[str, str, str, str]] = [] # (abs_path, rel, old, new)
|
|
306
|
+
for abs_path, new_content in resolved:
|
|
307
|
+
old = ""
|
|
308
|
+
if os.path.exists(abs_path):
|
|
309
|
+
old = safe_read(abs_path)
|
|
310
|
+
if new_content.strip() == (old or "").strip():
|
|
311
|
+
continue
|
|
312
|
+
rel = os.path.relpath(abs_path, sandbox_root)
|
|
313
|
+
planned.append((abs_path, rel, old, new_content))
|
|
314
|
+
|
|
315
|
+
if planned:
|
|
316
|
+
if len(planned) > max_files_changed:
|
|
317
|
+
raise ValueError(
|
|
318
|
+
f"Patch would change {len(planned)} files, exceeding --max-files-changed={max_files_changed}"
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
if approve:
|
|
322
|
+
print("\nPROPOSED CHANGES (patch protocol):")
|
|
323
|
+
for _, rel, old, new in planned:
|
|
324
|
+
diff = "".join(
|
|
325
|
+
difflib.unified_diff(
|
|
326
|
+
(old or "").splitlines(keepends=True),
|
|
327
|
+
(new or "").splitlines(keepends=True),
|
|
328
|
+
fromfile=rel + " (before)",
|
|
329
|
+
tofile=rel + " (after)",
|
|
330
|
+
)
|
|
331
|
+
)
|
|
332
|
+
print(diff)
|
|
333
|
+
|
|
334
|
+
confirm = input("Apply these changes in the sandbox? (y/n): ").strip().lower()
|
|
335
|
+
if confirm != "y":
|
|
336
|
+
log("User declined patch application", "WARN")
|
|
337
|
+
planned = []
|
|
338
|
+
|
|
339
|
+
for abs_path, rel, old, new_content in planned:
|
|
340
|
+
expected_sha = sha_by_rel.get(rel)
|
|
341
|
+
if not expected_sha:
|
|
342
|
+
# Shouldn't happen; keep safe.
|
|
343
|
+
raise ValueError(f"Missing sha256 for {rel} in patch protocol payload")
|
|
344
|
+
|
|
345
|
+
atomic_write_verified_sha256(abs_path, new_content, expected_sha)
|
|
346
|
+
changed_sandbox_files.add(os.path.abspath(abs_path))
|
|
347
|
+
applied_any = True
|
|
348
|
+
|
|
349
|
+
except Exception as e:
|
|
350
|
+
log(f"Patch protocol failed ({e}); falling back to full-text mode", "WARN")
|
|
351
|
+
|
|
352
|
+
if not applied_any:
|
|
353
|
+
fixed_code = fix_code_with_gpt(
|
|
354
|
+
original_code=open(target_file, encoding="utf-8").read(),
|
|
355
|
+
error_log=stderr,
|
|
356
|
+
api_key=api_key,
|
|
357
|
+
model=model,
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
if fixed_code.strip() == open(target_file, encoding="utf-8").read().strip():
|
|
361
|
+
log("GPT returned no changes. Stopping.", "WARN")
|
|
362
|
+
break
|
|
363
|
+
|
|
364
|
+
if approve:
|
|
365
|
+
import difflib
|
|
366
|
+
|
|
367
|
+
old = open(target_file, encoding="utf-8").read()
|
|
368
|
+
diff = "".join(
|
|
369
|
+
difflib.unified_diff(
|
|
370
|
+
(old or "").splitlines(keepends=True),
|
|
371
|
+
(fixed_code or "").splitlines(keepends=True),
|
|
372
|
+
fromfile=os.path.relpath(target_file, sandbox_root) + " (before)",
|
|
373
|
+
tofile=os.path.relpath(target_file, sandbox_root) + " (after)",
|
|
374
|
+
)
|
|
375
|
+
)
|
|
376
|
+
print("\nPROPOSED CHANGES (legacy mode):")
|
|
377
|
+
print(diff)
|
|
378
|
+
confirm = input("Apply this change in the sandbox? (y/n): ").strip().lower()
|
|
379
|
+
if confirm != "y":
|
|
380
|
+
log("User declined patch application", "WARN")
|
|
381
|
+
cleanup_sandbox()
|
|
382
|
+
return False
|
|
383
|
+
|
|
384
|
+
from auto_code_fixer.patcher import safe_write
|
|
385
|
+
|
|
386
|
+
safe_write(target_file, fixed_code)
|
|
387
|
+
changed_sandbox_files.add(os.path.abspath(target_file))
|
|
388
|
+
applied_any = True
|
|
389
|
+
|
|
390
|
+
if applied_any:
|
|
391
|
+
_run_optional_formatters_and_linters()
|
|
392
|
+
log("Code updated by GPT ✏️")
|
|
393
|
+
time.sleep(1)
|
|
208
394
|
|
|
209
395
|
log("Failed to auto-fix file after max retries ❌", "ERROR")
|
|
210
396
|
cleanup_sandbox()
|
|
@@ -248,6 +434,56 @@ def main():
|
|
|
248
434
|
help="Optional: use AI to suggest which file to edit (AUTO_CODE_FIXER_AI_PLAN=1)",
|
|
249
435
|
)
|
|
250
436
|
|
|
437
|
+
parser.add_argument(
|
|
438
|
+
"--legacy-mode",
|
|
439
|
+
action="store_true",
|
|
440
|
+
help=(
|
|
441
|
+
"Disable the default JSON patch protocol and use legacy full-text edit mode only. "
|
|
442
|
+
"(Not recommended; patch protocol is safer and supports multi-file fixes.)"
|
|
443
|
+
),
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
parser.add_argument(
|
|
447
|
+
"--max-files-changed",
|
|
448
|
+
type=int,
|
|
449
|
+
default=20,
|
|
450
|
+
help="Safety guard: maximum number of files the model may change per attempt (default: 20)",
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
parser.add_argument(
|
|
454
|
+
"--context-files",
|
|
455
|
+
type=int,
|
|
456
|
+
default=3,
|
|
457
|
+
help=(
|
|
458
|
+
"Include up to N related local files (imports) as read-only context in the LLM prompt "
|
|
459
|
+
"when using patch protocol (default: 3)"
|
|
460
|
+
),
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
parser.add_argument(
|
|
464
|
+
"--approve",
|
|
465
|
+
action="store_true",
|
|
466
|
+
help="Show diffs for proposed changes and ask for approval before applying them in the sandbox",
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
parser.add_argument(
|
|
470
|
+
"--format",
|
|
471
|
+
default=None,
|
|
472
|
+
choices=["black"],
|
|
473
|
+
help="Optional: run formatter in sandbox (best-effort). Currently supported: black",
|
|
474
|
+
)
|
|
475
|
+
parser.add_argument(
|
|
476
|
+
"--lint",
|
|
477
|
+
default=None,
|
|
478
|
+
choices=["ruff"],
|
|
479
|
+
help="Optional: run linter in sandbox (best-effort). Currently supported: ruff",
|
|
480
|
+
)
|
|
481
|
+
parser.add_argument(
|
|
482
|
+
"--fix",
|
|
483
|
+
action="store_true",
|
|
484
|
+
help="If used with --lint ruff, apply fixes (ruff --fix) (best-effort)",
|
|
485
|
+
)
|
|
486
|
+
|
|
251
487
|
# ✅ Proper boolean flags
|
|
252
488
|
ask_group = parser.add_mutually_exclusive_group()
|
|
253
489
|
ask_group.add_argument(
|
|
@@ -302,6 +538,13 @@ def main():
|
|
|
302
538
|
timeout_s=args.timeout,
|
|
303
539
|
max_retries=args.max_retries,
|
|
304
540
|
run_cmd=args.run,
|
|
541
|
+
patch_protocol=not args.legacy_mode,
|
|
542
|
+
max_files_changed=args.max_files_changed,
|
|
543
|
+
context_files=args.context_files,
|
|
544
|
+
approve=args.approve,
|
|
545
|
+
fmt=args.format,
|
|
546
|
+
lint=args.lint,
|
|
547
|
+
lint_fix=args.fix,
|
|
305
548
|
)
|
|
306
549
|
|
|
307
550
|
raise SystemExit(0 if ok else 2)
|
auto_code_fixer/fixer.py
CHANGED
|
@@ -77,3 +77,80 @@ def fix_code_with_gpt(
|
|
|
77
77
|
text += getattr(c, "text", "") or ""
|
|
78
78
|
|
|
79
79
|
return _strip_code_fences(text)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def fix_code_with_gpt_patch_protocol(
|
|
83
|
+
*,
|
|
84
|
+
sandbox_root: str,
|
|
85
|
+
error_log: str,
|
|
86
|
+
api_key: str | None = None,
|
|
87
|
+
model: str | None = None,
|
|
88
|
+
hint_paths: list[str] | None = None,
|
|
89
|
+
context_files: list[tuple[str, str]] | None = None,
|
|
90
|
+
) -> str:
|
|
91
|
+
"""Ask the model for structured edits (patch protocol).
|
|
92
|
+
|
|
93
|
+
Returns raw model text (expected JSON). Parsing/validation happens elsewhere.
|
|
94
|
+
|
|
95
|
+
context_files: optional list of (relative_path, content) to include as read-only context.
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
client = get_openai_client(api_key)
|
|
99
|
+
model = model or os.getenv("AUTO_CODE_FIXER_MODEL") or "gpt-4.1-mini"
|
|
100
|
+
|
|
101
|
+
schema = {
|
|
102
|
+
"files": [
|
|
103
|
+
{
|
|
104
|
+
"path": "relative/path/from/sandbox_root.py",
|
|
105
|
+
"new_content": "FULL new file contents",
|
|
106
|
+
"sha256": "sha256 hex of new_content encoded as utf-8",
|
|
107
|
+
}
|
|
108
|
+
]
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
hints = ""
|
|
112
|
+
if hint_paths:
|
|
113
|
+
hints = "\n\nCANDIDATE FILES (relative paths; edit one or more if needed):\n" + "\n".join(
|
|
114
|
+
f"- {p}" for p in hint_paths
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
ctx = ""
|
|
118
|
+
if context_files:
|
|
119
|
+
# Keep it readable and avoid enormous prompts.
|
|
120
|
+
blocks: list[str] = []
|
|
121
|
+
for rel, content in context_files:
|
|
122
|
+
snippet = content
|
|
123
|
+
if len(snippet) > 4000:
|
|
124
|
+
snippet = snippet[:4000] + "\n... (truncated)"
|
|
125
|
+
blocks.append(f"--- FILE: {rel} ---\n{snippet}\n")
|
|
126
|
+
ctx = "\n\nREAD-ONLY CONTEXT FILES:\n" + "\n".join(blocks)
|
|
127
|
+
|
|
128
|
+
prompt = (
|
|
129
|
+
"You are a senior Python engineer. Fix the project so it runs without errors.\n"
|
|
130
|
+
"Return ONLY valid JSON that matches this schema (no markdown, no commentary):\n"
|
|
131
|
+
+ json.dumps(schema)
|
|
132
|
+
+ hints
|
|
133
|
+
+ ctx
|
|
134
|
+
+ "\n\nSANDBOX ROOT:\n"
|
|
135
|
+
+ sandbox_root
|
|
136
|
+
+ "\n\nERROR LOG:\n"
|
|
137
|
+
+ error_log
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
resp = client.responses.create(
|
|
141
|
+
model=model,
|
|
142
|
+
input=[
|
|
143
|
+
{"role": "system", "content": "You output strict JSON patches for code fixes."},
|
|
144
|
+
{"role": "user", "content": prompt},
|
|
145
|
+
],
|
|
146
|
+
temperature=0.2,
|
|
147
|
+
max_output_tokens=3000,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
text = ""
|
|
151
|
+
for item in resp.output or []:
|
|
152
|
+
for c in item.content or []:
|
|
153
|
+
if getattr(c, "type", None) in ("output_text", "text"):
|
|
154
|
+
text += getattr(c, "text", "") or ""
|
|
155
|
+
|
|
156
|
+
return _strip_code_fences(text)
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True)
|
|
8
|
+
class PatchFile:
|
|
9
|
+
"""A single file edit from the structured patch protocol."""
|
|
10
|
+
|
|
11
|
+
path: str
|
|
12
|
+
new_content: str
|
|
13
|
+
sha256: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def strip_code_fences(text: str) -> str:
|
|
17
|
+
text = (text or "").strip()
|
|
18
|
+
if text.startswith("```"):
|
|
19
|
+
text = "\n".join(text.split("\n")[1:])
|
|
20
|
+
if text.endswith("```"):
|
|
21
|
+
text = "\n".join(text.split("\n")[:-1])
|
|
22
|
+
return text.strip()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def compute_sha256_utf8(content: str) -> str:
|
|
26
|
+
return hashlib.sha256((content or "").encode("utf-8")).hexdigest()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_patch_protocol_response(text: str) -> list[PatchFile]:
|
|
30
|
+
"""Parse model output for the patch protocol.
|
|
31
|
+
|
|
32
|
+
Expected JSON schema:
|
|
33
|
+
{"files": [{"path": "...", "new_content": "...", "sha256": "..."}, ...]}
|
|
34
|
+
|
|
35
|
+
Raises ValueError on invalid input.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
cleaned = strip_code_fences(text)
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
payload = json.loads(cleaned)
|
|
42
|
+
except Exception as e: # pragma: no cover
|
|
43
|
+
raise ValueError(f"Invalid JSON: {e}")
|
|
44
|
+
|
|
45
|
+
if not isinstance(payload, dict):
|
|
46
|
+
raise ValueError("Patch protocol JSON must be an object")
|
|
47
|
+
|
|
48
|
+
files = payload.get("files")
|
|
49
|
+
if not isinstance(files, list) or not files:
|
|
50
|
+
raise ValueError("Patch protocol JSON must contain non-empty 'files' list")
|
|
51
|
+
|
|
52
|
+
out: list[PatchFile] = []
|
|
53
|
+
for i, f in enumerate(files):
|
|
54
|
+
if not isinstance(f, dict):
|
|
55
|
+
raise ValueError(f"files[{i}] must be an object")
|
|
56
|
+
|
|
57
|
+
path = f.get("path")
|
|
58
|
+
new_content = f.get("new_content")
|
|
59
|
+
sha256 = f.get("sha256")
|
|
60
|
+
|
|
61
|
+
if not isinstance(path, str) or not path.strip():
|
|
62
|
+
raise ValueError(f"files[{i}].path must be a non-empty string")
|
|
63
|
+
if not isinstance(new_content, str):
|
|
64
|
+
raise ValueError(f"files[{i}].new_content must be a string")
|
|
65
|
+
if not isinstance(sha256, str) or len(sha256.strip()) != 64:
|
|
66
|
+
raise ValueError(f"files[{i}].sha256 must be a 64-char hex string")
|
|
67
|
+
|
|
68
|
+
out.append(PatchFile(path=path, new_content=new_content, sha256=sha256.strip().lower()))
|
|
69
|
+
|
|
70
|
+
return out
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def validate_and_resolve_patch_files(
|
|
74
|
+
patch_files: list[PatchFile], *, sandbox_root: str
|
|
75
|
+
) -> list[tuple[str, str]]:
|
|
76
|
+
"""Validate patch files and return a list of (abs_path, new_content).
|
|
77
|
+
|
|
78
|
+
Safety:
|
|
79
|
+
- paths must be relative and remain within sandbox_root
|
|
80
|
+
- sha256 must match new_content (utf-8)
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
sr = os.path.realpath(os.path.abspath(sandbox_root))
|
|
84
|
+
resolved: list[tuple[str, str]] = []
|
|
85
|
+
|
|
86
|
+
for pf in patch_files:
|
|
87
|
+
if os.path.isabs(pf.path):
|
|
88
|
+
raise ValueError(f"Absolute path not allowed in patch protocol: {pf.path}")
|
|
89
|
+
|
|
90
|
+
# Normalize and resolve against sandbox_root
|
|
91
|
+
abs_path = os.path.realpath(os.path.abspath(os.path.join(sr, pf.path)))
|
|
92
|
+
|
|
93
|
+
if not (abs_path.startswith(sr + os.sep) or abs_path == sr):
|
|
94
|
+
raise ValueError(f"Patch path escapes sandbox root: {pf.path}")
|
|
95
|
+
|
|
96
|
+
got = compute_sha256_utf8(pf.new_content)
|
|
97
|
+
if got != pf.sha256:
|
|
98
|
+
raise ValueError(f"sha256 mismatch for {pf.path}: expected {pf.sha256}, got {got}")
|
|
99
|
+
|
|
100
|
+
resolved.append((abs_path, pf.new_content))
|
|
101
|
+
|
|
102
|
+
return resolved
|
auto_code_fixer/patcher.py
CHANGED
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
import os
|
|
2
|
+
import tempfile
|
|
2
3
|
from dataclasses import dataclass
|
|
3
4
|
|
|
5
|
+
from auto_code_fixer.patch_protocol import compute_sha256_utf8
|
|
6
|
+
|
|
4
7
|
|
|
5
8
|
@dataclass
|
|
6
9
|
class FileEdit:
|
|
@@ -8,10 +11,43 @@ class FileEdit:
|
|
|
8
11
|
new_content: str
|
|
9
12
|
|
|
10
13
|
|
|
14
|
+
def _atomic_write(path: str, content: str) -> None:
|
|
15
|
+
"""Atomically write text content to path.
|
|
16
|
+
|
|
17
|
+
Writes to a temp file in the same directory then os.replace() to avoid partial writes.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
dir_name = os.path.dirname(path) or "."
|
|
21
|
+
os.makedirs(dir_name, exist_ok=True)
|
|
22
|
+
|
|
23
|
+
fd, tmp_path = tempfile.mkstemp(prefix=".acf_", suffix=".tmp", dir=dir_name)
|
|
24
|
+
try:
|
|
25
|
+
with os.fdopen(fd, "w", encoding="utf-8") as f:
|
|
26
|
+
f.write(content)
|
|
27
|
+
f.flush()
|
|
28
|
+
os.fsync(f.fileno())
|
|
29
|
+
os.replace(tmp_path, path)
|
|
30
|
+
finally:
|
|
31
|
+
try:
|
|
32
|
+
if os.path.exists(tmp_path):
|
|
33
|
+
os.unlink(tmp_path)
|
|
34
|
+
except Exception:
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def atomic_write_verified_sha256(path: str, content: str, expected_sha256: str) -> None:
|
|
39
|
+
"""Atomically write text content, verifying sha256(content) before commit."""
|
|
40
|
+
|
|
41
|
+
got = compute_sha256_utf8(content)
|
|
42
|
+
if got != (expected_sha256 or "").strip().lower():
|
|
43
|
+
raise ValueError(f"sha256 mismatch for {path}: expected {expected_sha256}, got {got}")
|
|
44
|
+
|
|
45
|
+
_atomic_write(path, content)
|
|
46
|
+
|
|
47
|
+
|
|
11
48
|
def safe_write(path: str, content: str) -> None:
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
f.write(content)
|
|
49
|
+
# Backwards-compatible name; now atomic.
|
|
50
|
+
_atomic_write(path, content)
|
|
15
51
|
|
|
16
52
|
|
|
17
53
|
def safe_read(path: str) -> str:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: auto-code-fixer
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.6
|
|
4
4
|
Summary: Automatically fix Python code using ChatGPT
|
|
5
5
|
Author-email: Arif Shah <ashah7775@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -121,6 +121,21 @@ auto-code-fixer main.py --ai-plan
|
|
|
121
121
|
```
|
|
122
122
|
This enables a helper that can suggest which local file to edit. It is best-effort.
|
|
123
123
|
|
|
124
|
+
### Optional structured patch protocol (JSON + sha256)
|
|
125
|
+
```bash
|
|
126
|
+
auto-code-fixer main.py --patch-protocol
|
|
127
|
+
```
|
|
128
|
+
When enabled, the model is asked to return strict JSON with `{files:[{path,new_content,sha256}]}`.
|
|
129
|
+
The tool verifies the SHA-256 hash of `new_content` before applying edits, and falls back to the
|
|
130
|
+
legacy full-text mode if parsing/validation fails.
|
|
131
|
+
|
|
132
|
+
### Optional formatting / linting (best-effort)
|
|
133
|
+
```bash
|
|
134
|
+
auto-code-fixer main.py --format black
|
|
135
|
+
auto-code-fixer main.py --lint ruff --fix
|
|
136
|
+
```
|
|
137
|
+
These run inside the sandbox venv and are skipped if the tools are not installed.
|
|
138
|
+
|
|
124
139
|
---
|
|
125
140
|
|
|
126
141
|
## Environment variables
|
|
@@ -1,19 +1,20 @@
|
|
|
1
|
-
auto_code_fixer/__init__.py,sha256=
|
|
2
|
-
auto_code_fixer/cli.py,sha256=
|
|
1
|
+
auto_code_fixer/__init__.py,sha256=W_9dCm49nLvZulVAvvsafxLJjVBSKDBHz9K7szFZllo,22
|
|
2
|
+
auto_code_fixer/cli.py,sha256=lFIGgrMQaqAoNtGRlkKXE1Lzk4l74XwsYjdxnrlF6lI,20304
|
|
3
3
|
auto_code_fixer/command_runner.py,sha256=6P8hGRavN5C39x-e03p02Vc805NnZH9U7e48ngb5jJI,1104
|
|
4
|
-
auto_code_fixer/fixer.py,sha256=
|
|
4
|
+
auto_code_fixer/fixer.py,sha256=zcgw56pRTuOLvna09lTXatD0VWwjjzBVk0OyEKfgxDM,4691
|
|
5
5
|
auto_code_fixer/installer.py,sha256=LC0jasSsPI7eHMeDxa622OoMCR1951HAXUZWp-kcmVY,1522
|
|
6
6
|
auto_code_fixer/models.py,sha256=JLBJutOoiOjjlT_RMPUPhWlmm1yc_nGcQqv5tY72Al0,317
|
|
7
|
-
auto_code_fixer/
|
|
7
|
+
auto_code_fixer/patch_protocol.py,sha256=8l1E9o-3jkO4VAI7Ulrf-1MbAshNzjQXtUkmH-0hYio,3216
|
|
8
|
+
auto_code_fixer/patcher.py,sha256=BcQTnjWazdpuEXyR2AlumFBzIk_yIrO3fGTaIqpHuiU,1811
|
|
8
9
|
auto_code_fixer/plan.py,sha256=jrZdG-f1RDxVB0tBLlTwKbCSEiOYI_RMetdzfBcyE4s,1762
|
|
9
10
|
auto_code_fixer/runner.py,sha256=BvQm3CrwkQEDOw0tpiamSTcdu3OjbOgA801xW2zWdP8,970
|
|
10
11
|
auto_code_fixer/sandbox.py,sha256=FWQcCxNDI4i7ckTKHuARSSIHCopBRqG16MVtx9s75R8,1628
|
|
11
12
|
auto_code_fixer/traceback_utils.py,sha256=sbSuLO-2UBk5QPJZYJunTK9WGOpEY8mxR6WRKbtCIoM,935
|
|
12
13
|
auto_code_fixer/utils.py,sha256=YXCv3PcDo5NBM1odksBTWkHTEELRtEXfPDIORA5iYaM,3090
|
|
13
14
|
auto_code_fixer/venv_manager.py,sha256=2ww8reYgLbLohh-moAD5YKM09qv_mC5yYzJRwm3XiXc,1202
|
|
14
|
-
auto_code_fixer-0.3.
|
|
15
|
-
auto_code_fixer-0.3.
|
|
16
|
-
auto_code_fixer-0.3.
|
|
17
|
-
auto_code_fixer-0.3.
|
|
18
|
-
auto_code_fixer-0.3.
|
|
19
|
-
auto_code_fixer-0.3.
|
|
15
|
+
auto_code_fixer-0.3.6.dist-info/licenses/LICENSE,sha256=hgchJNa26tjXuLztwSUDbYQxNLnAPnLk6kDXNIkC8xc,1066
|
|
16
|
+
auto_code_fixer-0.3.6.dist-info/METADATA,sha256=R6geFjh2cpLvAzuR1KQRcAJyryQgClYYB1wWmD1o2Qk,3870
|
|
17
|
+
auto_code_fixer-0.3.6.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
18
|
+
auto_code_fixer-0.3.6.dist-info/entry_points.txt,sha256=a-j2rkfwkrhXZ5Qbz_6_gwk6Bj7nijYR1DALjWp5Myk,61
|
|
19
|
+
auto_code_fixer-0.3.6.dist-info/top_level.txt,sha256=qUk1qznb6Qxqmxy2A3z_5dpOZlmNKHwUiLuJwH-CrAk,16
|
|
20
|
+
auto_code_fixer-0.3.6.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|