patch-fixer 0.3.3__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- patch_fixer/cli.py +16 -1
- patch_fixer/patch_fixer.py +252 -52
- {patch_fixer-0.3.3.dist-info → patch_fixer-0.4.0.dist-info}/METADATA +21 -2
- patch_fixer-0.4.0.dist-info/RECORD +10 -0
- patch_fixer-0.3.3.dist-info/RECORD +0 -10
- {patch_fixer-0.3.3.dist-info → patch_fixer-0.4.0.dist-info}/WHEEL +0 -0
- {patch_fixer-0.3.3.dist-info → patch_fixer-0.4.0.dist-info}/entry_points.txt +0 -0
- {patch_fixer-0.3.3.dist-info → patch_fixer-0.4.0.dist-info}/licenses/LICENSE +0 -0
- {patch_fixer-0.3.3.dist-info → patch_fixer-0.4.0.dist-info}/top_level.txt +0 -0
patch_fixer/cli.py
CHANGED
@@ -14,7 +14,12 @@ def fix_command(args):
|
|
14
14
|
with open(args.broken_patch, encoding='utf-8') as f:
|
15
15
|
patch_lines = f.readlines()
|
16
16
|
|
17
|
-
fixed_lines = fix_patch(
|
17
|
+
fixed_lines = fix_patch(
|
18
|
+
patch_lines,
|
19
|
+
args.original,
|
20
|
+
fuzzy=args.fuzzy,
|
21
|
+
add_newline=args.add_newline
|
22
|
+
)
|
18
23
|
|
19
24
|
with open(args.output, 'w', encoding='utf-8') as f:
|
20
25
|
f.writelines(fixed_lines)
|
@@ -77,6 +82,16 @@ def main():
|
|
77
82
|
'output',
|
78
83
|
help='Path where the fixed patch will be written'
|
79
84
|
)
|
85
|
+
fix_parser.add_argument(
|
86
|
+
'--fuzzy',
|
87
|
+
action='store_true',
|
88
|
+
help='Enable fuzzy string matching when finding hunks in original files'
|
89
|
+
)
|
90
|
+
fix_parser.add_argument(
|
91
|
+
'--add-newline',
|
92
|
+
action='store_true',
|
93
|
+
help='Add final newline when processing "No newline at end of file" markers'
|
94
|
+
)
|
80
95
|
|
81
96
|
# split command
|
82
97
|
split_parser = subparsers.add_parser(
|
patch_fixer/patch_fixer.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
import os
|
3
3
|
import re
|
4
4
|
import sys
|
5
|
+
import warnings
|
5
6
|
from pathlib import Path
|
6
7
|
|
7
8
|
from git import Repo
|
@@ -16,13 +17,49 @@ regexes = {
|
|
16
17
|
"RENAME_TO": re.compile(rf'rename to ({path_regex})'),
|
17
18
|
"FILE_HEADER_START": re.compile(rf'--- (a/{path_regex}|/dev/null)'),
|
18
19
|
"FILE_HEADER_END": re.compile(rf'\+\+\+ (b/{path_regex}|/dev/null)'),
|
19
|
-
"HUNK_HEADER": re.compile(r'^@@ -(\d+)
|
20
|
+
"HUNK_HEADER": re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)$'),
|
20
21
|
"END_LINE": re.compile(r'\')
|
21
22
|
}
|
22
23
|
|
23
24
|
|
24
|
-
class
|
25
|
-
|
25
|
+
class HunkErrorBase(Exception):
|
26
|
+
def __init__(self, hunk_lines, file="(unknown file)"):
|
27
|
+
super().__init__()
|
28
|
+
self.hunk = "".join(hunk_lines)
|
29
|
+
self.file = file
|
30
|
+
|
31
|
+
def format_hunk_for_error(self):
|
32
|
+
"""Format hunk for error messages, showing only context and deletion lines."""
|
33
|
+
error_lines = []
|
34
|
+
for line in self.hunk.splitlines(keepends=True):
|
35
|
+
if line.startswith((' ', '-')): # context or deletion lines
|
36
|
+
error_lines.append(line)
|
37
|
+
# skip addition lines (+) as they shouldn't be in the original file
|
38
|
+
return ''.join(error_lines)
|
39
|
+
|
40
|
+
def add_file(self, file):
|
41
|
+
self.file = file
|
42
|
+
|
43
|
+
|
44
|
+
class MissingHunkError(HunkErrorBase):
|
45
|
+
def __str__(self):
|
46
|
+
return (f"Could not find hunk in {self.file}:"
|
47
|
+
f"\n================================"
|
48
|
+
f"\n{self.format_hunk_for_error()}"
|
49
|
+
f"================================")
|
50
|
+
|
51
|
+
|
52
|
+
class OutOfOrderHunk(HunkErrorBase):
|
53
|
+
def __init__(self, hunk_lines, prev_header, file="(unknown file)"):
|
54
|
+
super().__init__(hunk_lines, file)
|
55
|
+
self.prev_header = prev_header
|
56
|
+
|
57
|
+
def __str__(self):
|
58
|
+
return (f"Out of order hunk in {self.file}:"
|
59
|
+
f"\n==============================="
|
60
|
+
f"\n{self.format_hunk_for_error()}"
|
61
|
+
f"==============================="
|
62
|
+
f"\nOccurs before previous hunk with header {self.prev_header}")
|
26
63
|
|
27
64
|
|
28
65
|
class BadCarriageReturn(ValueError):
|
@@ -61,11 +98,37 @@ def normalize_line(line):
|
|
61
98
|
return core + "\n"
|
62
99
|
|
63
100
|
|
64
|
-
def
|
101
|
+
def fuzzy_line_similarity(line1, line2, threshold=0.8):
|
102
|
+
"""Calculate similarity between two lines using a simple ratio."""
|
103
|
+
l1, l2 = line1.strip(), line2.strip()
|
104
|
+
|
105
|
+
# empty strings are identical
|
106
|
+
if len(l1) == 0 and len(l2) == 0:
|
107
|
+
return 1.0
|
108
|
+
|
109
|
+
if l1 == l2:
|
110
|
+
return 1.0
|
111
|
+
|
112
|
+
if len(l1) == 0 or len(l2) == 0:
|
113
|
+
return 0.0
|
114
|
+
|
115
|
+
# count common characters
|
116
|
+
common = 0
|
117
|
+
for char in set(l1) & set(l2):
|
118
|
+
common += min(l1.count(char), l2.count(char))
|
119
|
+
|
120
|
+
total_chars = len(l1) + len(l2)
|
121
|
+
return (2.0 * common) / total_chars if total_chars > 0 else 0.0
|
122
|
+
|
123
|
+
|
124
|
+
def find_hunk_start(context_lines, original_lines, fuzzy=False):
|
65
125
|
"""Search original_lines for context_lines and return start line index (0-based)."""
|
66
126
|
ctx = []
|
67
127
|
for line in context_lines:
|
68
|
-
if
|
128
|
+
if regexes["END_LINE"].match(line):
|
129
|
+
# "" is just git metadata; skip
|
130
|
+
continue
|
131
|
+
elif line.startswith(" "):
|
69
132
|
ctx.append(line.lstrip(" "))
|
70
133
|
elif line.startswith("-"):
|
71
134
|
# can't use lstrip; we want to keep other dashes in the line
|
@@ -74,12 +137,47 @@ def find_hunk_start(context_lines, original_lines):
|
|
74
137
|
ctx.append(line)
|
75
138
|
if not ctx:
|
76
139
|
raise ValueError("Cannot search for empty hunk.")
|
140
|
+
|
141
|
+
# first try exact matching
|
77
142
|
for i in range(len(original_lines) - len(ctx) + 1):
|
78
143
|
# this part will fail if the diff is malformed beyond hunk header
|
79
|
-
equal_lines = [original_lines[i+j].strip() == ctx[j].strip() for j in range(len(ctx))]
|
144
|
+
equal_lines = [original_lines[i + j].strip() == ctx[j].strip() for j in range(len(ctx))]
|
80
145
|
if all(equal_lines):
|
81
146
|
return i
|
82
|
-
|
147
|
+
|
148
|
+
# try with more flexible whitespace matching
|
149
|
+
for i in range(len(original_lines) - len(ctx) + 1):
|
150
|
+
equal_lines = []
|
151
|
+
for j in range(len(ctx)):
|
152
|
+
orig_line = original_lines[i + j].strip()
|
153
|
+
ctx_line = ctx[j].strip()
|
154
|
+
# normalize whitespace: convert multiple spaces/tabs to single space
|
155
|
+
orig_normalized = ' '.join(orig_line.split())
|
156
|
+
ctx_normalized = ' '.join(ctx_line.split())
|
157
|
+
equal_lines.append(orig_normalized == ctx_normalized)
|
158
|
+
if all(equal_lines):
|
159
|
+
return i
|
160
|
+
|
161
|
+
# if fuzzy matching is enabled and exact match failed, try fuzzy match
|
162
|
+
if fuzzy:
|
163
|
+
best_match_score = 0.0
|
164
|
+
best_match_pos = 0
|
165
|
+
|
166
|
+
for i in range(len(original_lines) - len(ctx) + 1):
|
167
|
+
total_similarity = 0.0
|
168
|
+
for j in range(len(ctx)):
|
169
|
+
similarity = fuzzy_line_similarity(original_lines[i + j], ctx[j])
|
170
|
+
total_similarity += similarity
|
171
|
+
|
172
|
+
avg_similarity = total_similarity / len(ctx)
|
173
|
+
if avg_similarity > best_match_score and avg_similarity > 0.6:
|
174
|
+
best_match_score = avg_similarity
|
175
|
+
best_match_pos = i
|
176
|
+
|
177
|
+
if best_match_score > 0.6:
|
178
|
+
return best_match_pos
|
179
|
+
|
180
|
+
raise MissingHunkError(context_lines)
|
83
181
|
|
84
182
|
|
85
183
|
def match_line(line):
|
@@ -111,24 +209,76 @@ def reconstruct_file_header(diff_line, header_type):
|
|
111
209
|
raise ValueError(f"Unsupported header type: {header_type}")
|
112
210
|
|
113
211
|
|
114
|
-
def
|
212
|
+
def find_all_hunk_starts(hunk_lines, search_lines, fuzzy=False):
|
213
|
+
"""Return all line indices in search_lines where this hunk matches."""
|
214
|
+
matches = []
|
215
|
+
start = 0
|
216
|
+
while True:
|
217
|
+
try:
|
218
|
+
idx = find_hunk_start(hunk_lines, search_lines[start:], fuzzy=fuzzy)
|
219
|
+
matches.append(start + idx)
|
220
|
+
start += idx + 1
|
221
|
+
except MissingHunkError:
|
222
|
+
break
|
223
|
+
return matches
|
224
|
+
|
225
|
+
|
226
|
+
def capture_hunk(current_hunk, original_lines, offset, last_hunk, old_header, fuzzy=False):
|
227
|
+
"""
|
228
|
+
Try to locate the hunk's true position in the original file.
|
229
|
+
If multiple possible matches exist, pick the one closest to the expected
|
230
|
+
(possibly corrupted) line number derived from the old hunk header.
|
231
|
+
"""
|
232
|
+
# extract needed info from old header match groups
|
233
|
+
expected_old_start = int(old_header[0]) if old_header else 0
|
234
|
+
try:
|
235
|
+
hunk_context = old_header[4]
|
236
|
+
except IndexError:
|
237
|
+
hunk_context = ""
|
238
|
+
|
115
239
|
# compute line counts
|
116
240
|
old_count = sum(1 for l in current_hunk if l.startswith((' ', '-')))
|
117
241
|
new_count = sum(1 for l in current_hunk if l.startswith((' ', '+')))
|
118
242
|
|
119
243
|
if old_count > 0:
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
#
|
124
|
-
|
125
|
-
|
244
|
+
search_index = last_hunk
|
245
|
+
search_lines = original_lines[search_index:]
|
246
|
+
|
247
|
+
# gather *all* possible matches
|
248
|
+
matches = find_all_hunk_starts(current_hunk, search_lines, fuzzy=fuzzy)
|
249
|
+
if matches:
|
250
|
+
# rebase to file line numbers (1-indexed later)
|
251
|
+
candidate_positions = [m + search_index for m in matches]
|
252
|
+
|
253
|
+
if expected_old_start:
|
254
|
+
# choose the one closest to the expected position
|
255
|
+
old_start = min(
|
256
|
+
candidate_positions,
|
257
|
+
key=lambda pos: abs(pos + 1 - expected_old_start),
|
258
|
+
) + 1 # convert to 1-indexed
|
259
|
+
else:
|
260
|
+
# pick first match if no expected line info
|
261
|
+
old_start = candidate_positions[0] + 1
|
126
262
|
else:
|
127
|
-
|
128
|
-
|
129
|
-
|
263
|
+
# try from start of file as fallback
|
264
|
+
matches = find_all_hunk_starts(current_hunk, original_lines, fuzzy=fuzzy)
|
265
|
+
if not matches:
|
266
|
+
raise MissingHunkError(current_hunk)
|
267
|
+
if expected_old_start:
|
268
|
+
old_start = (
|
269
|
+
min(matches, key=lambda pos: abs(pos + 1 - expected_old_start)) + 1
|
270
|
+
)
|
130
271
|
else:
|
131
|
-
|
272
|
+
old_start = matches[0] + 1
|
273
|
+
|
274
|
+
if old_start < last_hunk + 1:
|
275
|
+
raise OutOfOrderHunk(current_hunk, original_lines[last_hunk])
|
276
|
+
|
277
|
+
if new_count == 0:
|
278
|
+
# complete deletion of remaining content
|
279
|
+
new_start = 0
|
280
|
+
else:
|
281
|
+
new_start = old_start + offset
|
132
282
|
else:
|
133
283
|
# old count of zero can only mean file creation, since adding lines to
|
134
284
|
# an existing file requires surrounding context lines without a +
|
@@ -137,17 +287,43 @@ def capture_hunk(current_hunk, original_lines, offset, last_hunk, hunk_context):
|
|
137
287
|
|
138
288
|
offset += (new_count - old_count)
|
139
289
|
|
140
|
-
last_hunk
|
290
|
+
last_hunk += (old_start - last_hunk)
|
141
291
|
|
142
|
-
#
|
143
|
-
|
292
|
+
# use condensed header if it's only one line
|
293
|
+
old_part = f"{old_start},{old_count}" if old_count != 1 else f"{old_start}"
|
294
|
+
new_part = f"{new_start},{new_count}" if new_count != 1 else f"{new_start}"
|
295
|
+
|
296
|
+
fixed_header = f"@@ -{old_part} +{new_part} @@{hunk_context}\n"
|
144
297
|
|
145
298
|
return fixed_header, offset, last_hunk
|
146
299
|
|
147
300
|
|
301
|
+
def read_file_with_fallback_encoding(file_path):
|
302
|
+
"""Read file with UTF-8, falling back to other encodings if needed."""
|
303
|
+
encodings = ['utf-8', 'latin-1', 'cp1252', 'iso-8859-1']
|
304
|
+
|
305
|
+
for encoding in encodings:
|
306
|
+
try:
|
307
|
+
with open(file_path, 'r', encoding=encoding) as f:
|
308
|
+
return f.readlines()
|
309
|
+
except UnicodeDecodeError:
|
310
|
+
continue
|
311
|
+
|
312
|
+
# If all encodings fail, read as binary and replace problematic characters
|
313
|
+
with open(file_path, 'rb') as f:
|
314
|
+
content = f.read()
|
315
|
+
# Decode with UTF-8, replacing errors
|
316
|
+
text_content = content.decode('utf-8', errors='replace')
|
317
|
+
return text_content.splitlines(keepends=True)
|
318
|
+
|
319
|
+
|
148
320
|
def regenerate_index(old_path, new_path, cur_dir):
|
149
321
|
repo = Repo(cur_dir)
|
150
|
-
|
322
|
+
|
323
|
+
# Common git file modes: 100644 (regular file), 100755 (executable file),
|
324
|
+
# 120000 (symbolic link), 160000 (submodule), 040000 (tree/directory)
|
325
|
+
# TODO: guess mode based on above information
|
326
|
+
mode = " 100644"
|
151
327
|
|
152
328
|
# file deletion
|
153
329
|
if new_path == "/dev/null":
|
@@ -164,12 +340,15 @@ def regenerate_index(old_path, new_path, cur_dir):
|
|
164
340
|
return f"index {old_sha}..{new_sha}{mode}\n"
|
165
341
|
|
166
342
|
|
167
|
-
def fix_patch(patch_lines, original, remove_binary=False):
|
343
|
+
def fix_patch(patch_lines, original, remove_binary=False, fuzzy=False, add_newline=False):
|
168
344
|
dir_mode = os.path.isdir(original)
|
169
345
|
original_path = Path(original).absolute()
|
170
346
|
|
171
347
|
# make relative paths in the diff work
|
172
|
-
|
348
|
+
if dir_mode:
|
349
|
+
os.chdir(original_path)
|
350
|
+
else:
|
351
|
+
os.chdir(original_path.parent)
|
173
352
|
|
174
353
|
fixed_lines = []
|
175
354
|
current_hunk = []
|
@@ -186,7 +365,7 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
186
365
|
similarity_index = None
|
187
366
|
missing_index = False
|
188
367
|
binary_file = False
|
189
|
-
|
368
|
+
current_hunk_header = ()
|
190
369
|
original_lines = []
|
191
370
|
file_loaded = False
|
192
371
|
|
@@ -201,10 +380,10 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
201
380
|
fixed_header,
|
202
381
|
offset,
|
203
382
|
last_hunk
|
204
|
-
) = capture_hunk(current_hunk, original_lines, offset, last_hunk,
|
205
|
-
except MissingHunkError:
|
206
|
-
|
207
|
-
|
383
|
+
) = capture_hunk(current_hunk, original_lines, offset, last_hunk, current_hunk_header, fuzzy=fuzzy)
|
384
|
+
except (MissingHunkError, OutOfOrderHunk) as e:
|
385
|
+
e.add_file(current_file)
|
386
|
+
raise e
|
208
387
|
fixed_lines.append(fixed_header)
|
209
388
|
fixed_lines.extend(current_hunk)
|
210
389
|
current_hunk = []
|
@@ -224,7 +403,12 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
224
403
|
last_mode = i
|
225
404
|
fixed_lines.append(normalize_line(line))
|
226
405
|
case "INDEX_LINE":
|
227
|
-
#
|
406
|
+
# mode should be present in index line for all operations except file deletion
|
407
|
+
# for deletions, the mode is omitted since the file no longer exists
|
408
|
+
index_line = normalize_line(line).strip()
|
409
|
+
if not index_line.endswith("..0000000") and not re.search(r' [0-7]{6}$', index_line):
|
410
|
+
# TODO: this is the right idea, but a poor implementation
|
411
|
+
pass
|
228
412
|
last_index = i
|
229
413
|
similarity_index = match_groups[0]
|
230
414
|
if similarity_index:
|
@@ -238,7 +422,9 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
238
422
|
fixed_lines.append(normalize_line(line))
|
239
423
|
case "RENAME_FROM":
|
240
424
|
if not look_for_rename:
|
241
|
-
|
425
|
+
# handle case where rename from appears without corresponding index line
|
426
|
+
# this may indicate a malformed patch, but we can try to continue
|
427
|
+
warnings.warn(f"Warning: 'rename from' found without expected index line at line {i+1}")
|
242
428
|
if binary_file:
|
243
429
|
raise NotImplementedError("Renaming binary files not yet supported")
|
244
430
|
if last_index != i - 1:
|
@@ -252,7 +438,10 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
252
438
|
offset = 0
|
253
439
|
last_hunk = 0
|
254
440
|
if not Path.exists(current_path):
|
255
|
-
#
|
441
|
+
# this is meant to handle cases where the source file
|
442
|
+
# doesn't exist (e.g., when applying a patch that renames
|
443
|
+
# a file created earlier in the same patch)
|
444
|
+
# TODO: but really, does that ever happen???
|
256
445
|
fixed_lines.append(normalize_line(line))
|
257
446
|
look_for_rename = True
|
258
447
|
file_loaded = False
|
@@ -260,8 +449,8 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
260
449
|
if not current_path.is_file():
|
261
450
|
raise IsADirectoryError(f"Rename from header points to a directory, not a file: {current_file}")
|
262
451
|
if dir_mode or current_path == original_path:
|
263
|
-
|
264
|
-
|
452
|
+
file_lines = read_file_with_fallback_encoding(current_path)
|
453
|
+
original_lines = [l.rstrip('\n') for l in file_lines]
|
265
454
|
fixed_lines.append(normalize_line(line))
|
266
455
|
file_loaded = True
|
267
456
|
else:
|
@@ -273,7 +462,12 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
273
462
|
last_index = i - 2
|
274
463
|
else:
|
275
464
|
raise NotImplementedError("Missing `rename from` header not yet supported.")
|
276
|
-
|
465
|
+
if not look_for_rename:
|
466
|
+
# if we're not looking for a rename but encounter "rename to",
|
467
|
+
# this indicates a malformed patch - log warning but continue
|
468
|
+
warnings.warn(
|
469
|
+
f"Warning: unexpected 'rename to' found at line {i + 1} without corresponding 'rename from'"
|
470
|
+
)
|
277
471
|
current_file = match_groups[0]
|
278
472
|
current_path = Path(current_file).absolute()
|
279
473
|
if current_file and current_path.is_dir():
|
@@ -315,8 +509,8 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
315
509
|
raise IsADirectoryError(f"File header start points to a directory, not a file: {current_file}")
|
316
510
|
if not file_loaded:
|
317
511
|
if dir_mode or Path(current_file) == Path(original):
|
318
|
-
|
319
|
-
|
512
|
+
file_lines = read_file_with_fallback_encoding(current_path)
|
513
|
+
original_lines = [l.rstrip('\n') for l in file_lines]
|
320
514
|
file_loaded = True
|
321
515
|
else:
|
322
516
|
raise FileNotFoundError(f"Filename {current_file} in header does not match argument {original}")
|
@@ -404,7 +598,7 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
404
598
|
# we can't fix the hunk header before we've captured a hunk
|
405
599
|
if first_hunk:
|
406
600
|
first_hunk = False
|
407
|
-
|
601
|
+
current_hunk_header = match_groups
|
408
602
|
continue
|
409
603
|
|
410
604
|
try:
|
@@ -412,19 +606,22 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
412
606
|
fixed_header,
|
413
607
|
offset,
|
414
608
|
last_hunk
|
415
|
-
) = capture_hunk(current_hunk, original_lines, offset, last_hunk,
|
416
|
-
except MissingHunkError:
|
417
|
-
|
418
|
-
|
609
|
+
) = capture_hunk(current_hunk, original_lines, offset, last_hunk, current_hunk_header, fuzzy=fuzzy)
|
610
|
+
except (MissingHunkError, OutOfOrderHunk) as e:
|
611
|
+
e.add_file(current_file)
|
612
|
+
raise e
|
419
613
|
fixed_lines.append(fixed_header)
|
420
614
|
fixed_lines.extend(current_hunk)
|
421
615
|
current_hunk = []
|
422
|
-
|
616
|
+
current_hunk_header = match_groups
|
423
617
|
case "END_LINE":
|
424
|
-
#
|
425
|
-
|
618
|
+
# if user requested, add a newline at end of file when this marker is present
|
619
|
+
if add_newline:
|
620
|
+
fixed_lines.append("\n")
|
621
|
+
else:
|
622
|
+
current_hunk.append(normalize_line(line))
|
426
623
|
case _:
|
427
|
-
# TODO: fuzzy string matching
|
624
|
+
# TODO: fix fuzzy string matching to be less granular
|
428
625
|
# this is a normal line, add to current hunk
|
429
626
|
current_hunk.append(normalize_line(line))
|
430
627
|
|
@@ -434,15 +631,18 @@ def fix_patch(patch_lines, original, remove_binary=False):
|
|
434
631
|
fixed_header,
|
435
632
|
offset,
|
436
633
|
last_hunk
|
437
|
-
) = capture_hunk(current_hunk, original_lines, offset, last_hunk,
|
438
|
-
except MissingHunkError:
|
439
|
-
|
440
|
-
|
634
|
+
) = capture_hunk(current_hunk, original_lines, offset, last_hunk, current_hunk_header, fuzzy=fuzzy)
|
635
|
+
except (MissingHunkError, OutOfOrderHunk) as e:
|
636
|
+
e.add_file(current_file)
|
637
|
+
raise e
|
441
638
|
fixed_lines.append(fixed_header)
|
442
639
|
fixed_lines.extend(current_hunk)
|
443
640
|
|
444
|
-
# if original file didn't end with a newline, strip out the newline here
|
445
|
-
|
641
|
+
# if original file didn't end with a newline, strip out the newline here,
|
642
|
+
# unless user explicitly requested to add final newline
|
643
|
+
if (not add_newline and
|
644
|
+
((original_lines and not original_lines[-1].endswith("\n")) or
|
645
|
+
(fixed_lines and len(original_lines) == 0))):
|
446
646
|
fixed_lines[-1] = fixed_lines[-1].rstrip("\n")
|
447
647
|
|
448
648
|
return fixed_lines
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: patch-fixer
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.4.0
|
4
4
|
Summary: Fixes erroneous git apply patches to the best of its ability.
|
5
5
|
Maintainer-email: Alex Mueller <amueller474@gmail.com>
|
6
6
|
License-Expression: Apache-2.0
|
@@ -55,6 +55,11 @@ where:
|
|
55
55
|
- `broken.patch` is the malformed patch generated by the LLM
|
56
56
|
- `fixed.patch` is the output file containing the (hopefully) fixed patch
|
57
57
|
|
58
|
+
Options:
|
59
|
+
- `--fuzzy`: enable fuzzy string matching for better context matching (experimental)
|
60
|
+
- `--add-newline`: add final newlines when processing "No newline at end of file" markers
|
61
|
+
|
62
|
+
|
58
63
|
#### Splitting patches by file:
|
59
64
|
```bash
|
60
65
|
# Split with files specified on command line
|
@@ -81,9 +86,16 @@ original = "/path/to/original/state" # file or directory being patched
|
|
81
86
|
with open(patch_file, encoding="utf-8") as f:
|
82
87
|
patch_lines = f.readlines()
|
83
88
|
|
89
|
+
# basic usage
|
84
90
|
fixed_lines = fix_patch(patch_lines, original)
|
85
|
-
output_file = "/path/to/fixed.patch"
|
86
91
|
|
92
|
+
# with fuzzy matching enabled
|
93
|
+
fixed_lines = fix_patch(patch_lines, original, fuzzy=True)
|
94
|
+
|
95
|
+
# with final newline addition
|
96
|
+
fixed_lines = fix_patch(patch_lines, original, add_newline=True)
|
97
|
+
|
98
|
+
output_file = "/path/to/fixed.patch"
|
87
99
|
with open(output_file, 'w', encoding='utf-8') as f:
|
88
100
|
f.writelines(fixed_lines)
|
89
101
|
```
|
@@ -107,6 +119,13 @@ with open("excluded.patch", 'w', encoding='utf-8') as f:
|
|
107
119
|
f.writelines(excluded)
|
108
120
|
```
|
109
121
|
|
122
|
+
## Known Limitations
|
123
|
+
|
124
|
+
- When fixing patches with missing `index` lines, the tool requires the files to be in a git repository to regenerate the index. This is only needed for file deletions and renames.
|
125
|
+
- `patch-fixer` assumes the patch follows git's unified diff format.
|
126
|
+
- Current implementation is not very robust to corrupted hunk content
|
127
|
+
- Much more comprehensive fuzzy string matching is planned
|
128
|
+
|
110
129
|
## Local Testing
|
111
130
|
```bash
|
112
131
|
git clone https://github.com/ajcm474/patch-fixer.git
|
@@ -0,0 +1,10 @@
|
|
1
|
+
patch_fixer/__init__.py,sha256=n5DDMr4jbO3epK3ybBvjDyRddTWlWamN6ao5BC7xHFo,65
|
2
|
+
patch_fixer/cli.py,sha256=4zy02FsVrUrcQzsBwQ58PVfJXoG4OsOYKpk2JXGw1cY,3841
|
3
|
+
patch_fixer/patch_fixer.py,sha256=YaArYeni8rFfFqILlytW7Bo9xz14SlsWGVjzHR_QqdI,28451
|
4
|
+
patch_fixer/split.py,sha256=l0rHM6-ZBuB9Iv6Ng6rxqZH5eKfvk2t87j__nDu67kM,3869
|
5
|
+
patch_fixer-0.4.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
6
|
+
patch_fixer-0.4.0.dist-info/METADATA,sha256=IAp5OWD110pKDSyzkDaj8rbLuX307Oo_CjJ93Ti-4-s,4907
|
7
|
+
patch_fixer-0.4.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
+
patch_fixer-0.4.0.dist-info/entry_points.txt,sha256=ftc6dP6B1zJouSPeCCJLZtx-EEGVSrNEwy4YhtnEoxA,53
|
9
|
+
patch_fixer-0.4.0.dist-info/top_level.txt,sha256=yyp3KjFgExJsrFsS9ZBCnkhb05xg8hPYhB7ncdpTOv0,12
|
10
|
+
patch_fixer-0.4.0.dist-info/RECORD,,
|
@@ -1,10 +0,0 @@
|
|
1
|
-
patch_fixer/__init__.py,sha256=n5DDMr4jbO3epK3ybBvjDyRddTWlWamN6ao5BC7xHFo,65
|
2
|
-
patch_fixer/cli.py,sha256=hgneS8DSCWBxv1l0u37n60FPGskAsDgodX9YxKgF-H0,3417
|
3
|
-
patch_fixer/patch_fixer.py,sha256=GAavb15H5cEoNFgGlO5hIY7EOF88VCsjHcLrfyGW4_0,20587
|
4
|
-
patch_fixer/split.py,sha256=l0rHM6-ZBuB9Iv6Ng6rxqZH5eKfvk2t87j__nDu67kM,3869
|
5
|
-
patch_fixer-0.3.3.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
6
|
-
patch_fixer-0.3.3.dist-info/METADATA,sha256=URfv2ws8naMvx3t8xsqN5JyGBlC38EydFYPwgivTnvQ,4117
|
7
|
-
patch_fixer-0.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
8
|
-
patch_fixer-0.3.3.dist-info/entry_points.txt,sha256=ftc6dP6B1zJouSPeCCJLZtx-EEGVSrNEwy4YhtnEoxA,53
|
9
|
-
patch_fixer-0.3.3.dist-info/top_level.txt,sha256=yyp3KjFgExJsrFsS9ZBCnkhb05xg8hPYhB7ncdpTOv0,12
|
10
|
-
patch_fixer-0.3.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|