auto-code-fixer 0.3.5__tar.gz → 0.3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/PKG-INFO +33 -6
  2. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/README.md +32 -5
  3. auto_code_fixer-0.3.7/auto_code_fixer/__init__.py +1 -0
  4. auto_code_fixer-0.3.7/auto_code_fixer/approval.py +114 -0
  5. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/cli.py +240 -10
  6. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/fixer.py +14 -1
  7. auto_code_fixer-0.3.7/auto_code_fixer/patcher.py +67 -0
  8. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/PKG-INFO +33 -6
  9. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/SOURCES.txt +3 -0
  10. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/pyproject.toml +1 -1
  11. auto_code_fixer-0.3.7/tests/test_approval_and_guards.py +55 -0
  12. auto_code_fixer-0.3.7/tests/test_atomic_write.py +34 -0
  13. auto_code_fixer-0.3.5/auto_code_fixer/__init__.py +0 -1
  14. auto_code_fixer-0.3.5/auto_code_fixer/patcher.py +0 -31
  15. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/LICENSE +0 -0
  16. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/command_runner.py +0 -0
  17. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/installer.py +0 -0
  18. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/models.py +0 -0
  19. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/patch_protocol.py +0 -0
  20. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/plan.py +0 -0
  21. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/runner.py +0 -0
  22. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/sandbox.py +0 -0
  23. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/traceback_utils.py +0 -0
  24. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/utils.py +0 -0
  25. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer/venv_manager.py +0 -0
  26. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/dependency_links.txt +0 -0
  27. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/entry_points.txt +0 -0
  28. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/requires.txt +0 -0
  29. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/auto_code_fixer.egg-info/top_level.txt +0 -0
  30. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/setup.cfg +0 -0
  31. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/tests/test_fix_imported_file.py +0 -0
  32. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/tests/test_internal_imports.py +0 -0
  33. {auto_code_fixer-0.3.5 → auto_code_fixer-0.3.7}/tests/test_patch_protocol.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: auto-code-fixer
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Automatically fix Python code using ChatGPT
5
5
  Author-email: Arif Shah <ashah7775@gmail.com>
6
6
  License: MIT
@@ -87,6 +87,18 @@ pip install requests
87
87
  Before overwriting any file, it creates a backup:
88
88
  - `file.py.bak` (or `.bak1`, `.bak2`, ...)
89
89
 
90
+ ### Approval mode (diff review)
91
+ ```bash
92
+ auto-code-fixer path/to/main.py --project-root . --approve
93
+ ```
94
+ In patch-protocol mode, approvals are **file-by-file** (apply/skip).
95
+
96
+ ### Diff / size guards
97
+ To prevent huge edits from being applied accidentally:
98
+ - `--max-diff-lines` limits unified-diff size per file
99
+ - `--max-file-bytes` limits the proposed new content size per file
100
+ - `--max-total-bytes` limits total proposed new content across all files
101
+
90
102
  ### Dry run
91
103
  ```bash
92
104
  auto-code-fixer path/to/main.py --project-root . --dry-run
@@ -103,6 +115,16 @@ Instead of `python main.py`, run tests:
103
115
  auto-code-fixer . --project-root . --run "pytest -q" --no-ask
104
116
  ```
105
117
 
118
+ When you use `--run`, the tool (by default) also performs a **post-apply check**:
119
+ after copying fixes back to your project, it re-runs the same command against the real project files
120
+ (using the sandbox venv for dependencies).
121
+
122
+ You can disable that extra check with:
123
+
124
+ ```bash
125
+ auto-code-fixer . --project-root . --run "pytest -q" --no-post-apply-check
126
+ ```
127
+
106
128
  ### Model selection
107
129
  ```bash
108
130
  export AUTO_CODE_FIXER_MODEL=gpt-4.1-mini
@@ -121,13 +143,18 @@ auto-code-fixer main.py --ai-plan
121
143
  ```
122
144
  This enables a helper that can suggest which local file to edit. It is best-effort.
123
145
 
124
- ### Optional structured patch protocol (JSON + sha256)
146
+ ### Structured patch protocol (JSON + sha256) (default)
147
+ By default, Auto Code Fixer uses a structured **patch protocol** where the model returns strict JSON:
148
+
149
+ `{ "files": [ {"path": "...", "new_content": "...", "sha256": "..."}, ... ] }`
150
+
151
+ The tool verifies the SHA-256 hash of `new_content` before applying edits.
152
+
153
+ To disable this and use legacy full-text mode only:
154
+
125
155
  ```bash
126
- auto-code-fixer main.py --patch-protocol
156
+ auto-code-fixer main.py --legacy-mode
127
157
  ```
128
- When enabled, the model is asked to return strict JSON with `{files:[{path,new_content,sha256}]}`.
129
- The tool verifies the SHA-256 hash of `new_content` before applying edits, and falls back to the
130
- legacy full-text mode if parsing/validation fails.
131
158
 
132
159
  ### Optional formatting / linting (best-effort)
133
160
  ```bash
@@ -66,6 +66,18 @@ pip install requests
66
66
  Before overwriting any file, it creates a backup:
67
67
  - `file.py.bak` (or `.bak1`, `.bak2`, ...)
68
68
 
69
+ ### Approval mode (diff review)
70
+ ```bash
71
+ auto-code-fixer path/to/main.py --project-root . --approve
72
+ ```
73
+ In patch-protocol mode, approvals are **file-by-file** (apply/skip).
74
+
75
+ ### Diff / size guards
76
+ To prevent huge edits from being applied accidentally:
77
+ - `--max-diff-lines` limits unified-diff size per file
78
+ - `--max-file-bytes` limits the proposed new content size per file
79
+ - `--max-total-bytes` limits total proposed new content across all files
80
+
69
81
  ### Dry run
70
82
  ```bash
71
83
  auto-code-fixer path/to/main.py --project-root . --dry-run
@@ -82,6 +94,16 @@ Instead of `python main.py`, run tests:
82
94
  auto-code-fixer . --project-root . --run "pytest -q" --no-ask
83
95
  ```
84
96
 
97
+ When you use `--run`, the tool (by default) also performs a **post-apply check**:
98
+ after copying fixes back to your project, it re-runs the same command against the real project files
99
+ (using the sandbox venv for dependencies).
100
+
101
+ You can disable that extra check with:
102
+
103
+ ```bash
104
+ auto-code-fixer . --project-root . --run "pytest -q" --no-post-apply-check
105
+ ```
106
+
85
107
  ### Model selection
86
108
  ```bash
87
109
  export AUTO_CODE_FIXER_MODEL=gpt-4.1-mini
@@ -100,13 +122,18 @@ auto-code-fixer main.py --ai-plan
100
122
  ```
101
123
  This enables a helper that can suggest which local file to edit. It is best-effort.
102
124
 
103
- ### Optional structured patch protocol (JSON + sha256)
125
+ ### Structured patch protocol (JSON + sha256) (default)
126
+ By default, Auto Code Fixer uses a structured **patch protocol** where the model returns strict JSON:
127
+
128
+ `{ "files": [ {"path": "...", "new_content": "...", "sha256": "..."}, ... ] }`
129
+
130
+ The tool verifies the SHA-256 hash of `new_content` before applying edits.
131
+
132
+ To disable this and use legacy full-text mode only:
133
+
104
134
  ```bash
105
- auto-code-fixer main.py --patch-protocol
135
+ auto-code-fixer main.py --legacy-mode
106
136
  ```
107
- When enabled, the model is asked to return strict JSON with `{files:[{path,new_content,sha256}]}`.
108
- The tool verifies the SHA-256 hash of `new_content` before applying edits, and falls back to the
109
- legacy full-text mode if parsing/validation fails.
110
137
 
111
138
  ### Optional formatting / linting (best-effort)
112
139
  ```bash
@@ -0,0 +1 @@
1
+ __version__ = "0.3.7"
@@ -0,0 +1,114 @@
1
+ import difflib
2
+ from dataclasses import dataclass
3
+
4
+
5
+ @dataclass(frozen=True)
6
+ class PlannedChange:
7
+ abs_path: str
8
+ rel_path: str
9
+ old_content: str
10
+ new_content: str
11
+
12
+
13
+ class UserAbort(Exception):
14
+ pass
15
+
16
+
17
+ def unified_diff_text(rel_path: str, old: str, new: str) -> str:
18
+ return "".join(
19
+ difflib.unified_diff(
20
+ (old or "").splitlines(keepends=True),
21
+ (new or "").splitlines(keepends=True),
22
+ fromfile=rel_path + " (before)",
23
+ tofile=rel_path + " (after)",
24
+ )
25
+ )
26
+
27
+
28
+ def guard_planned_changes(
29
+ planned: list[PlannedChange],
30
+ *,
31
+ max_file_bytes: int,
32
+ max_total_bytes: int,
33
+ max_diff_lines: int,
34
+ ) -> None:
35
+ """Safety guards against huge edits.
36
+
37
+ Raises ValueError when limits are exceeded.
38
+ """
39
+
40
+ if max_file_bytes <= 0 or max_total_bytes <= 0 or max_diff_lines <= 0:
41
+ raise ValueError("Guard limits must be positive")
42
+
43
+ total = 0
44
+
45
+ for ch in planned:
46
+ b = len((ch.new_content or "").encode("utf-8"))
47
+ total += b
48
+ if b > max_file_bytes:
49
+ raise ValueError(
50
+ f"Proposed content for {ch.rel_path} is {b} bytes, exceeding --max-file-bytes={max_file_bytes}"
51
+ )
52
+
53
+ diff = unified_diff_text(ch.rel_path, ch.old_content, ch.new_content)
54
+ diff_lines = len(diff.splitlines())
55
+ if diff_lines > max_diff_lines:
56
+ raise ValueError(
57
+ f"Diff for {ch.rel_path} is {diff_lines} lines, exceeding --max-diff-lines={max_diff_lines}"
58
+ )
59
+
60
+ if total > max_total_bytes:
61
+ raise ValueError(
62
+ f"Total proposed content is {total} bytes, exceeding --max-total-bytes={max_total_bytes}"
63
+ )
64
+
65
+
66
+ def prompt_approve_file_by_file(
67
+ planned: list[PlannedChange],
68
+ *,
69
+ input_fn=input,
70
+ print_fn=print,
71
+ ) -> list[PlannedChange]:
72
+ """Interactive approval per file.
73
+
74
+ Commands:
75
+ y = apply this change
76
+ n = skip this change
77
+ a = apply this and all remaining
78
+ q = abort
79
+ """
80
+
81
+ if not planned:
82
+ return []
83
+
84
+ out: list[PlannedChange] = []
85
+ apply_all = False
86
+
87
+ for ch in planned:
88
+ diff = unified_diff_text(ch.rel_path, ch.old_content, ch.new_content)
89
+ print_fn("\nPROPOSED CHANGE:")
90
+ print_fn(diff)
91
+
92
+ if apply_all:
93
+ out.append(ch)
94
+ continue
95
+
96
+ ans = (
97
+ input_fn(f"Apply change for {ch.rel_path}? (y/n/a/q): ")
98
+ .strip()
99
+ .lower()
100
+ )
101
+ if ans == "y":
102
+ out.append(ch)
103
+ elif ans == "n":
104
+ continue
105
+ elif ans == "a":
106
+ out.append(ch)
107
+ apply_all = True
108
+ elif ans == "q":
109
+ raise UserAbort("User aborted approvals")
110
+ else:
111
+ # default: be safe
112
+ continue
113
+
114
+ return out
@@ -32,6 +32,13 @@ def fix_file(
32
32
  max_retries: int,
33
33
  run_cmd: str | None,
34
34
  patch_protocol: bool,
35
+ max_files_changed: int,
36
+ context_files: int,
37
+ approve: bool,
38
+ max_diff_lines: int,
39
+ max_file_bytes: int,
40
+ max_total_bytes: int,
41
+ post_apply_check: bool,
35
42
  fmt: str | None,
36
43
  lint: str | None,
37
44
  lint_fix: bool,
@@ -160,6 +167,8 @@ def fix_file(
160
167
  sr = os.path.realpath(os.path.abspath(sandbox_root))
161
168
  pr = os.path.realpath(os.path.abspath(project_root))
162
169
 
170
+ backups: dict[str, str] = {}
171
+
163
172
  for p in sorted(changed_sandbox_files):
164
173
  p_real = os.path.realpath(os.path.abspath(p))
165
174
 
@@ -189,12 +198,44 @@ def fix_file(
189
198
 
190
199
  if os.path.exists(dst_real):
191
200
  bak = backup_file(dst_real)
201
+ backups[dst_real] = bak
192
202
  log(f"Backup created: {bak}", "DEBUG")
193
203
 
194
204
  os.makedirs(os.path.dirname(dst_real), exist_ok=True)
195
205
  shutil.copy(p_real, dst_real)
196
206
  log(f"File updated: {dst_real}")
197
207
 
208
+ # Optional post-apply verification: run the same command against the real project files,
209
+ # using the sandbox venv for dependencies.
210
+ if post_apply_check and run_cmd and changed_sandbox_files:
211
+ from auto_code_fixer.command_runner import run_command
212
+
213
+ rc2, out2, err2 = run_command(
214
+ run_cmd,
215
+ timeout_s=timeout_s,
216
+ python_exe=venv_python,
217
+ cwd=project_root,
218
+ extra_env={"PYTHONPATH": project_root},
219
+ )
220
+ if verbose:
221
+ if out2:
222
+ log(f"POST-APPLY STDOUT:\n{out2}", "DEBUG")
223
+ if err2:
224
+ log(f"POST-APPLY STDERR:\n{err2}", "DEBUG")
225
+
226
+ if rc2 != 0:
227
+ log(
228
+ "Post-apply command failed; restoring backups (best-effort).",
229
+ "ERROR",
230
+ )
231
+ for dst_real, bak in backups.items():
232
+ try:
233
+ shutil.copy(bak, dst_real)
234
+ except Exception as e:
235
+ log(f"WARN: failed to restore {dst_real} from {bak}: {e}")
236
+ cleanup_sandbox()
237
+ return False
238
+
198
239
  cleanup_sandbox()
199
240
  log(f"Fix completed in {attempt + 1} attempt(s) 🎉")
200
241
  return True
@@ -247,34 +288,110 @@ def fix_file(
247
288
 
248
289
  if patch_protocol:
249
290
  try:
291
+ import difflib
292
+
250
293
  from auto_code_fixer.fixer import fix_code_with_gpt_patch_protocol
251
294
  from auto_code_fixer.patch_protocol import (
252
295
  parse_patch_protocol_response,
253
296
  validate_and_resolve_patch_files,
254
297
  )
255
- from auto_code_fixer.patcher import safe_read, safe_write
298
+ from auto_code_fixer.patcher import safe_read, atomic_write_verified_sha256
299
+ from auto_code_fixer.utils import find_imports
256
300
 
257
301
  hint_paths = [os.path.relpath(target_file, sandbox_root)]
302
+
303
+ # Add a few related local files as read-only context to reduce back-and-forth.
304
+ ctx_pairs: list[tuple[str, str]] = []
305
+ if context_files and context_files > 0:
306
+ rels: list[str] = []
307
+ for abs_p in find_imports(target_file, sandbox_root):
308
+ try:
309
+ rels.append(os.path.relpath(abs_p, sandbox_root))
310
+ except Exception:
311
+ continue
312
+ # drop duplicates and the target itself
313
+ rels = [r for r in rels if r not in hint_paths]
314
+ for rel in rels[:context_files]:
315
+ abs_p = os.path.join(sandbox_root, rel)
316
+ if os.path.exists(abs_p):
317
+ try:
318
+ ctx_pairs.append((rel, safe_read(abs_p)))
319
+ except Exception:
320
+ pass
321
+
258
322
  raw = fix_code_with_gpt_patch_protocol(
259
323
  sandbox_root=sandbox_root,
260
324
  error_log=stderr,
261
325
  api_key=api_key,
262
326
  model=model,
263
327
  hint_paths=hint_paths,
328
+ context_files=ctx_pairs,
264
329
  )
265
330
 
266
331
  patch_files = parse_patch_protocol_response(raw)
332
+
333
+ if len(patch_files) > max_files_changed:
334
+ raise ValueError(
335
+ f"Patch wants to change {len(patch_files)} files, exceeding --max-files-changed={max_files_changed}"
336
+ )
337
+
267
338
  resolved = validate_and_resolve_patch_files(patch_files, sandbox_root=sandbox_root)
268
339
 
269
- # Apply patches (only if they change content)
340
+ # Prepare diffs / approvals and apply atomically.
341
+ sha_by_rel = {pf.path: pf.sha256 for pf in patch_files}
342
+
343
+ from auto_code_fixer.approval import (
344
+ PlannedChange,
345
+ UserAbort,
346
+ guard_planned_changes,
347
+ prompt_approve_file_by_file,
348
+ )
349
+
350
+ planned: list[PlannedChange] = []
270
351
  for abs_path, new_content in resolved:
271
352
  old = ""
272
353
  if os.path.exists(abs_path):
273
354
  old = safe_read(abs_path)
274
355
  if new_content.strip() == (old or "").strip():
275
356
  continue
276
- safe_write(abs_path, new_content)
277
- changed_sandbox_files.add(os.path.abspath(abs_path))
357
+ rel = os.path.relpath(abs_path, sandbox_root)
358
+ planned.append(
359
+ PlannedChange(
360
+ abs_path=abs_path,
361
+ rel_path=rel,
362
+ old_content=old,
363
+ new_content=new_content,
364
+ )
365
+ )
366
+
367
+ if planned:
368
+ if len(planned) > max_files_changed:
369
+ raise ValueError(
370
+ f"Patch would change {len(planned)} files, exceeding --max-files-changed={max_files_changed}"
371
+ )
372
+
373
+ guard_planned_changes(
374
+ planned,
375
+ max_file_bytes=max_file_bytes,
376
+ max_total_bytes=max_total_bytes,
377
+ max_diff_lines=max_diff_lines,
378
+ )
379
+
380
+ if approve:
381
+ try:
382
+ planned = prompt_approve_file_by_file(planned)
383
+ except UserAbort:
384
+ log("User aborted patch application", "WARN")
385
+ planned = []
386
+
387
+ for ch in planned:
388
+ expected_sha = sha_by_rel.get(ch.rel_path)
389
+ if not expected_sha:
390
+ # Shouldn't happen; keep safe.
391
+ raise ValueError(f"Missing sha256 for {ch.rel_path} in patch protocol payload")
392
+
393
+ atomic_write_verified_sha256(ch.abs_path, ch.new_content, expected_sha)
394
+ changed_sandbox_files.add(os.path.abspath(ch.abs_path))
278
395
  applied_any = True
279
396
 
280
397
  except Exception as e:
@@ -292,9 +409,48 @@ def fix_file(
292
409
  log("GPT returned no changes. Stopping.", "WARN")
293
410
  break
294
411
 
295
- with open(target_file, "w", encoding="utf-8") as f:
296
- f.write(fixed_code)
412
+ from auto_code_fixer.approval import (
413
+ PlannedChange,
414
+ UserAbort,
415
+ guard_planned_changes,
416
+ prompt_approve_file_by_file,
417
+ )
418
+
419
+ old = open(target_file, encoding="utf-8").read()
420
+ rel = os.path.relpath(target_file, sandbox_root)
421
+
422
+ planned = [
423
+ PlannedChange(
424
+ abs_path=target_file,
425
+ rel_path=rel,
426
+ old_content=old,
427
+ new_content=fixed_code,
428
+ )
429
+ ]
430
+
431
+ guard_planned_changes(
432
+ planned,
433
+ max_file_bytes=max_file_bytes,
434
+ max_total_bytes=max_total_bytes,
435
+ max_diff_lines=max_diff_lines,
436
+ )
437
+
438
+ if approve:
439
+ try:
440
+ planned = prompt_approve_file_by_file(planned)
441
+ except UserAbort:
442
+ log("User declined patch application", "WARN")
443
+ cleanup_sandbox()
444
+ return False
297
445
 
446
+ if not planned:
447
+ log("No legacy changes approved", "WARN")
448
+ cleanup_sandbox()
449
+ return False
450
+
451
+ from auto_code_fixer.patcher import safe_write
452
+
453
+ safe_write(target_file, planned[0].new_content)
298
454
  changed_sandbox_files.add(os.path.abspath(target_file))
299
455
  applied_any = True
300
456
 
@@ -346,14 +502,74 @@ def main():
346
502
  )
347
503
 
348
504
  parser.add_argument(
349
- "--patch-protocol",
505
+ "--legacy-mode",
350
506
  action="store_true",
351
507
  help=(
352
- "Optional: ask the model for a strict JSON patch protocol with sha256 verification "
353
- "(falls back to full-text mode if parsing/validation fails)"
508
+ "Disable the default JSON patch protocol and use legacy full-text edit mode only. "
509
+ "(Not recommended; patch protocol is safer and supports multi-file fixes.)"
354
510
  ),
355
511
  )
356
512
 
513
+ parser.add_argument(
514
+ "--max-files-changed",
515
+ type=int,
516
+ default=20,
517
+ help="Safety guard: maximum number of files the model may change per attempt (default: 20)",
518
+ )
519
+
520
+ parser.add_argument(
521
+ "--context-files",
522
+ type=int,
523
+ default=3,
524
+ help=(
525
+ "Include up to N related local files (imports) as read-only context in the LLM prompt "
526
+ "when using patch protocol (default: 3)"
527
+ ),
528
+ )
529
+
530
+ parser.add_argument(
531
+ "--approve",
532
+ action="store_true",
533
+ help=(
534
+ "Show diffs for proposed changes and ask for approval before applying them in the sandbox. "
535
+ "In patch-protocol mode, approvals are file-by-file."
536
+ ),
537
+ )
538
+
539
+ parser.add_argument(
540
+ "--max-diff-lines",
541
+ type=int,
542
+ default=4000,
543
+ help="Safety guard: maximum unified-diff lines to display/apply per file (default: 4000)",
544
+ )
545
+ parser.add_argument(
546
+ "--max-file-bytes",
547
+ type=int,
548
+ default=200_000,
549
+ help="Safety guard: maximum size (bytes) of proposed new content per file (default: 200000)",
550
+ )
551
+ parser.add_argument(
552
+ "--max-total-bytes",
553
+ type=int,
554
+ default=1_000_000,
555
+ help="Safety guard: maximum total bytes of proposed new content across all files (default: 1000000)",
556
+ )
557
+
558
+ post_apply_group = parser.add_mutually_exclusive_group()
559
+ post_apply_group.add_argument(
560
+ "--post-apply-check",
561
+ action="store_true",
562
+ help=(
563
+ "If using --run, re-run the command against the real project files after applying fixes "
564
+ "(uses sandbox venv for deps)."
565
+ ),
566
+ )
567
+ post_apply_group.add_argument(
568
+ "--no-post-apply-check",
569
+ action="store_true",
570
+ help="Disable the post-apply re-run check (even if --run is set).",
571
+ )
572
+
357
573
  parser.add_argument(
358
574
  "--format",
359
575
  default=None,
@@ -415,6 +631,13 @@ def main():
415
631
  if args.ai_plan:
416
632
  os.environ["AUTO_CODE_FIXER_AI_PLAN"] = "1"
417
633
 
634
+ if args.no_post_apply_check:
635
+ post_apply_check = False
636
+ elif args.post_apply_check:
637
+ post_apply_check = True
638
+ else:
639
+ post_apply_check = bool(args.run)
640
+
418
641
  ok = fix_file(
419
642
  args.entry_file,
420
643
  args.project_root,
@@ -426,7 +649,14 @@ def main():
426
649
  timeout_s=args.timeout,
427
650
  max_retries=args.max_retries,
428
651
  run_cmd=args.run,
429
- patch_protocol=args.patch_protocol,
652
+ patch_protocol=not args.legacy_mode,
653
+ max_files_changed=args.max_files_changed,
654
+ context_files=args.context_files,
655
+ approve=args.approve,
656
+ max_diff_lines=args.max_diff_lines,
657
+ max_file_bytes=args.max_file_bytes,
658
+ max_total_bytes=args.max_total_bytes,
659
+ post_apply_check=post_apply_check,
430
660
  fmt=args.format,
431
661
  lint=args.lint,
432
662
  lint_fix=args.fix,
@@ -86,12 +86,13 @@ def fix_code_with_gpt_patch_protocol(
86
86
  api_key: str | None = None,
87
87
  model: str | None = None,
88
88
  hint_paths: list[str] | None = None,
89
+ context_files: list[tuple[str, str]] | None = None,
89
90
  ) -> str:
90
91
  """Ask the model for structured edits (patch protocol).
91
92
 
92
93
  Returns raw model text (expected JSON). Parsing/validation happens elsewhere.
93
94
 
94
- The protocol is optional and should be enabled explicitly by the caller.
95
+ context_files: optional list of (relative_path, content) to include as read-only context.
95
96
  """
96
97
 
97
98
  client = get_openai_client(api_key)
@@ -113,11 +114,23 @@ def fix_code_with_gpt_patch_protocol(
113
114
  f"- {p}" for p in hint_paths
114
115
  )
115
116
 
117
+ ctx = ""
118
+ if context_files:
119
+ # Keep it readable and avoid enormous prompts.
120
+ blocks: list[str] = []
121
+ for rel, content in context_files:
122
+ snippet = content
123
+ if len(snippet) > 4000:
124
+ snippet = snippet[:4000] + "\n... (truncated)"
125
+ blocks.append(f"--- FILE: {rel} ---\n{snippet}\n")
126
+ ctx = "\n\nREAD-ONLY CONTEXT FILES:\n" + "\n".join(blocks)
127
+
116
128
  prompt = (
117
129
  "You are a senior Python engineer. Fix the project so it runs without errors.\n"
118
130
  "Return ONLY valid JSON that matches this schema (no markdown, no commentary):\n"
119
131
  + json.dumps(schema)
120
132
  + hints
133
+ + ctx
121
134
  + "\n\nSANDBOX ROOT:\n"
122
135
  + sandbox_root
123
136
  + "\n\nERROR LOG:\n"
@@ -0,0 +1,67 @@
1
+ import os
2
+ import tempfile
3
+ from dataclasses import dataclass
4
+
5
+ from auto_code_fixer.patch_protocol import compute_sha256_utf8
6
+
7
+
8
+ @dataclass
9
+ class FileEdit:
10
+ path: str
11
+ new_content: str
12
+
13
+
14
+ def _atomic_write(path: str, content: str) -> None:
15
+ """Atomically write text content to path.
16
+
17
+ Writes to a temp file in the same directory then os.replace() to avoid partial writes.
18
+ """
19
+
20
+ dir_name = os.path.dirname(path) or "."
21
+ os.makedirs(dir_name, exist_ok=True)
22
+
23
+ fd, tmp_path = tempfile.mkstemp(prefix=".acf_", suffix=".tmp", dir=dir_name)
24
+ try:
25
+ with os.fdopen(fd, "w", encoding="utf-8") as f:
26
+ f.write(content)
27
+ f.flush()
28
+ os.fsync(f.fileno())
29
+ os.replace(tmp_path, path)
30
+ finally:
31
+ try:
32
+ if os.path.exists(tmp_path):
33
+ os.unlink(tmp_path)
34
+ except Exception:
35
+ pass
36
+
37
+
38
+ def atomic_write_verified_sha256(path: str, content: str, expected_sha256: str) -> None:
39
+ """Atomically write text content, verifying sha256(content) before commit."""
40
+
41
+ got = compute_sha256_utf8(content)
42
+ if got != (expected_sha256 or "").strip().lower():
43
+ raise ValueError(f"sha256 mismatch for {path}: expected {expected_sha256}, got {got}")
44
+
45
+ _atomic_write(path, content)
46
+
47
+
48
+ def safe_write(path: str, content: str) -> None:
49
+ # Backwards-compatible name; now atomic.
50
+ _atomic_write(path, content)
51
+
52
+
53
+ def safe_read(path: str) -> str:
54
+ with open(path, "r", encoding="utf-8") as f:
55
+ return f.read()
56
+
57
+
58
+ def backup_file(path: str) -> str:
59
+ bak = path + ".bak"
60
+ # Avoid overwriting an existing bak
61
+ i = 1
62
+ while os.path.exists(bak):
63
+ bak = f"{path}.bak{i}"
64
+ i += 1
65
+ with open(path, "rb") as src, open(bak, "wb") as dst:
66
+ dst.write(src.read())
67
+ return bak
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: auto-code-fixer
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Automatically fix Python code using ChatGPT
5
5
  Author-email: Arif Shah <ashah7775@gmail.com>
6
6
  License: MIT
@@ -87,6 +87,18 @@ pip install requests
87
87
  Before overwriting any file, it creates a backup:
88
88
  - `file.py.bak` (or `.bak1`, `.bak2`, ...)
89
89
 
90
+ ### Approval mode (diff review)
91
+ ```bash
92
+ auto-code-fixer path/to/main.py --project-root . --approve
93
+ ```
94
+ In patch-protocol mode, approvals are **file-by-file** (apply/skip).
95
+
96
+ ### Diff / size guards
97
+ To prevent huge edits from being applied accidentally:
98
+ - `--max-diff-lines` limits unified-diff size per file
99
+ - `--max-file-bytes` limits the proposed new content size per file
100
+ - `--max-total-bytes` limits total proposed new content across all files
101
+
90
102
  ### Dry run
91
103
  ```bash
92
104
  auto-code-fixer path/to/main.py --project-root . --dry-run
@@ -103,6 +115,16 @@ Instead of `python main.py`, run tests:
103
115
  auto-code-fixer . --project-root . --run "pytest -q" --no-ask
104
116
  ```
105
117
 
118
+ When you use `--run`, the tool (by default) also performs a **post-apply check**:
119
+ after copying fixes back to your project, it re-runs the same command against the real project files
120
+ (using the sandbox venv for dependencies).
121
+
122
+ You can disable that extra check with:
123
+
124
+ ```bash
125
+ auto-code-fixer . --project-root . --run "pytest -q" --no-post-apply-check
126
+ ```
127
+
106
128
  ### Model selection
107
129
  ```bash
108
130
  export AUTO_CODE_FIXER_MODEL=gpt-4.1-mini
@@ -121,13 +143,18 @@ auto-code-fixer main.py --ai-plan
121
143
  ```
122
144
  This enables a helper that can suggest which local file to edit. It is best-effort.
123
145
 
124
- ### Optional structured patch protocol (JSON + sha256)
146
+ ### Structured patch protocol (JSON + sha256) (default)
147
+ By default, Auto Code Fixer uses a structured **patch protocol** where the model returns strict JSON:
148
+
149
+ `{ "files": [ {"path": "...", "new_content": "...", "sha256": "..."}, ... ] }`
150
+
151
+ The tool verifies the SHA-256 hash of `new_content` before applying edits.
152
+
153
+ To disable this and use legacy full-text mode only:
154
+
125
155
  ```bash
126
- auto-code-fixer main.py --patch-protocol
156
+ auto-code-fixer main.py --legacy-mode
127
157
  ```
128
- When enabled, the model is asked to return strict JSON with `{files:[{path,new_content,sha256}]}`.
129
- The tool verifies the SHA-256 hash of `new_content` before applying edits, and falls back to the
130
- legacy full-text mode if parsing/validation fails.
131
158
 
132
159
  ### Optional formatting / linting (best-effort)
133
160
  ```bash
@@ -2,6 +2,7 @@ LICENSE
2
2
  README.md
3
3
  pyproject.toml
4
4
  auto_code_fixer/__init__.py
5
+ auto_code_fixer/approval.py
5
6
  auto_code_fixer/cli.py
6
7
  auto_code_fixer/command_runner.py
7
8
  auto_code_fixer/fixer.py
@@ -21,6 +22,8 @@ auto_code_fixer.egg-info/dependency_links.txt
21
22
  auto_code_fixer.egg-info/entry_points.txt
22
23
  auto_code_fixer.egg-info/requires.txt
23
24
  auto_code_fixer.egg-info/top_level.txt
25
+ tests/test_approval_and_guards.py
26
+ tests/test_atomic_write.py
24
27
  tests/test_fix_imported_file.py
25
28
  tests/test_internal_imports.py
26
29
  tests/test_patch_protocol.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "auto-code-fixer"
7
- version = "0.3.5"
7
+ version = "0.3.7"
8
8
  description = "Automatically fix Python code using ChatGPT"
9
9
  readme = "README.md"
10
10
 
@@ -0,0 +1,55 @@
1
+ import pytest
2
+
3
+ from auto_code_fixer.approval import (
4
+ PlannedChange,
5
+ UserAbort,
6
+ guard_planned_changes,
7
+ prompt_approve_file_by_file,
8
+ )
9
+
10
+
11
+ def test_guard_planned_changes_rejects_big_file():
12
+ planned = [
13
+ PlannedChange(
14
+ abs_path="/tmp/a.py",
15
+ rel_path="a.py",
16
+ old_content="",
17
+ new_content="x" * 11,
18
+ )
19
+ ]
20
+ with pytest.raises(ValueError, match="max-file-bytes"):
21
+ guard_planned_changes(planned, max_file_bytes=10, max_total_bytes=100, max_diff_lines=100)
22
+
23
+
24
+ def test_guard_planned_changes_rejects_total_bytes():
25
+ planned = [
26
+ PlannedChange("/tmp/a.py", "a.py", "", "x" * 6),
27
+ PlannedChange("/tmp/b.py", "b.py", "", "x" * 6),
28
+ ]
29
+ with pytest.raises(ValueError, match="max-total-bytes"):
30
+ guard_planned_changes(planned, max_file_bytes=100, max_total_bytes=10, max_diff_lines=100)
31
+
32
+
33
+ def test_prompt_approve_file_by_file_apply_skip():
34
+ planned = [
35
+ PlannedChange("/tmp/a.py", "a.py", "print(1)\n", "print(2)\n"),
36
+ PlannedChange("/tmp/b.py", "b.py", "print(1)\n", "print(3)\n"),
37
+ ]
38
+
39
+ answers = iter(["y", "n"])
40
+
41
+ def _input(_prompt: str) -> str:
42
+ return next(answers)
43
+
44
+ chosen = prompt_approve_file_by_file(planned, input_fn=_input, print_fn=lambda *_: None)
45
+ assert [c.rel_path for c in chosen] == ["a.py"]
46
+
47
+
48
+ def test_prompt_approve_file_by_file_abort():
49
+ planned = [PlannedChange("/tmp/a.py", "a.py", "", "x")]
50
+
51
+ def _input(_prompt: str) -> str:
52
+ return "q"
53
+
54
+ with pytest.raises(UserAbort):
55
+ prompt_approve_file_by_file(planned, input_fn=_input, print_fn=lambda *_: None)
@@ -0,0 +1,34 @@
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import pytest
5
+
6
+ from auto_code_fixer.patcher import atomic_write_verified_sha256, safe_read, safe_write
7
+ from auto_code_fixer.patch_protocol import compute_sha256_utf8
8
+
9
+
10
+ def test_safe_write_is_atomic_and_writes_content(tmp_path: Path):
11
+ p = tmp_path / "a.txt"
12
+ safe_write(str(p), "hello")
13
+ assert safe_read(str(p)) == "hello"
14
+
15
+
16
+ def test_atomic_write_verified_sha256_rejects_mismatch_and_does_not_modify(tmp_path: Path):
17
+ p = tmp_path / "a.txt"
18
+ p.write_text("old", encoding="utf-8")
19
+
20
+ with pytest.raises(ValueError, match="sha256 mismatch"):
21
+ atomic_write_verified_sha256(str(p), "new", expected_sha256="0" * 64)
22
+
23
+ assert p.read_text(encoding="utf-8") == "old"
24
+
25
+
26
+ def test_atomic_write_verified_sha256_accepts_and_modifies(tmp_path: Path):
27
+ p = tmp_path / "a.txt"
28
+ p.write_text("old", encoding="utf-8")
29
+
30
+ content = "new content\n"
31
+ sha = compute_sha256_utf8(content)
32
+ atomic_write_verified_sha256(str(p), content, expected_sha256=sha)
33
+
34
+ assert p.read_text(encoding="utf-8") == content
@@ -1 +0,0 @@
1
- __version__ = "0.3.5"
@@ -1,31 +0,0 @@
1
- import os
2
- from dataclasses import dataclass
3
-
4
-
5
- @dataclass
6
- class FileEdit:
7
- path: str
8
- new_content: str
9
-
10
-
11
- def safe_write(path: str, content: str) -> None:
12
- os.makedirs(os.path.dirname(path), exist_ok=True)
13
- with open(path, "w", encoding="utf-8") as f:
14
- f.write(content)
15
-
16
-
17
- def safe_read(path: str) -> str:
18
- with open(path, "r", encoding="utf-8") as f:
19
- return f.read()
20
-
21
-
22
- def backup_file(path: str) -> str:
23
- bak = path + ".bak"
24
- # Avoid overwriting an existing bak
25
- i = 1
26
- while os.path.exists(bak):
27
- bak = f"{path}.bak{i}"
28
- i += 1
29
- with open(path, "rb") as src, open(bak, "wb") as dst:
30
- dst.write(src.read())
31
- return bak
File without changes