gptdiff 0.1.24__tar.gz → 0.1.27__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {gptdiff-0.1.24 → gptdiff-0.1.27}/PKG-INFO +9 -7
- {gptdiff-0.1.24 → gptdiff-0.1.27}/README.md +9 -7
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff/applydiff.py +1 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff/gptdiff.py +81 -36
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff/gptpatch.py +13 -4
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/PKG-INFO +9 -7
- {gptdiff-0.1.24 → gptdiff-0.1.27}/setup.py +1 -1
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_applydiff.py +28 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_parse_diff_per_file.py +14 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_smartapply.py +20 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/LICENSE.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff/__init__.py +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/SOURCES.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/dependency_links.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/entry_points.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/requires.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/gptdiff.egg-info/top_level.txt +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/setup.cfg +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_applydiff_edgecases.py +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_diff_parse.py +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_failing_case.py +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_strip_bad_ouput.py +0 -0
- {gptdiff-0.1.24 → gptdiff-0.1.27}/tests/test_swallow_reasoning.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: gptdiff
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.27
|
4
4
|
Summary: A tool to generate and apply git diffs using LLMs
|
5
5
|
Author: 255labs
|
6
6
|
Classifier: License :: OSI Approved :: MIT License
|
@@ -82,13 +82,15 @@ done
|
|
82
82
|
|
83
83
|
*Requires reasoning model*
|
84
84
|
|
85
|
-
|
85
|
+
## Why Choose GPTDiff?
|
86
86
|
|
87
|
-
- **
|
88
|
-
- **
|
89
|
-
- **Auto-
|
90
|
-
- **
|
91
|
-
- **
|
87
|
+
- **Describe changes in plain English**
|
88
|
+
- **AI gets your whole project**
|
89
|
+
- **Auto-fixes conflicts**
|
90
|
+
- **Keeps code functional**
|
91
|
+
- **Fast setup, no fuss**
|
92
|
+
- **You approve every change**
|
93
|
+
- **Costs are upfront**
|
92
94
|
|
93
95
|
## Core Capabilities
|
94
96
|
|
@@ -55,13 +55,15 @@ done
|
|
55
55
|
|
56
56
|
*Requires reasoning model*
|
57
57
|
|
58
|
-
|
59
|
-
|
60
|
-
- **
|
61
|
-
- **
|
62
|
-
- **Auto-
|
63
|
-
- **
|
64
|
-
- **
|
58
|
+
## Why Choose GPTDiff?
|
59
|
+
|
60
|
+
- **Describe changes in plain English**
|
61
|
+
- **AI gets your whole project**
|
62
|
+
- **Auto-fixes conflicts**
|
63
|
+
- **Keeps code functional**
|
64
|
+
- **Fast setup, no fuss**
|
65
|
+
- **You approve every change**
|
66
|
+
- **Costs are upfront**
|
65
67
|
|
66
68
|
## Core Capabilities
|
67
69
|
|
@@ -189,6 +189,7 @@ def parse_diff_per_file(diff_text):
|
|
189
189
|
|
190
190
|
Note:
|
191
191
|
Uses 'b/' prefix detection from git diffs to determine target paths
|
192
|
+
This doesn't work all the time and needs to be revised with stronger models
|
192
193
|
"""
|
193
194
|
header_re = re.compile(r'^(?:diff --git\s+)?(a/[^ ]+)\s+(b/[^ ]+)\s*$', re.MULTILINE)
|
194
195
|
lines = diff_text.splitlines()
|
@@ -1,5 +1,6 @@
|
|
1
1
|
#!/usr/bin/env python3
|
2
2
|
from pathlib import Path
|
3
|
+
from urllib.parse import urlparse
|
3
4
|
import subprocess
|
4
5
|
import hashlib
|
5
6
|
import re
|
@@ -14,6 +15,7 @@ import pkgutil
|
|
14
15
|
import contextvars
|
15
16
|
from pkgutil import get_data
|
16
17
|
import threading
|
18
|
+
from threading import Lock
|
17
19
|
|
18
20
|
import openai
|
19
21
|
from openai import OpenAI
|
@@ -203,13 +205,26 @@ def load_prepend_file(file):
|
|
203
205
|
with open(file, 'r') as f:
|
204
206
|
return f.read()
|
205
207
|
|
206
|
-
|
208
|
+
def domain_for_url(base_url):
|
209
|
+
parsed = urlparse(base_url)
|
210
|
+
if parsed.netloc:
|
211
|
+
if parsed.username:
|
212
|
+
domain = parsed.hostname
|
213
|
+
if parsed.port:
|
214
|
+
domain += f":{parsed.port}"
|
215
|
+
else:
|
216
|
+
domain = parsed.netloc
|
217
|
+
else:
|
218
|
+
domain = base_url
|
219
|
+
return domain
|
220
|
+
|
207
221
|
def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperature=0.7, max_tokens=30000, api_key=None, base_url=None):
|
208
222
|
enc = tiktoken.get_encoding("o200k_base")
|
209
223
|
|
210
224
|
# Use colors in print statements
|
211
225
|
red = "\033[91m"
|
212
226
|
green = "\033[92m"
|
227
|
+
blue = "\033[94m"
|
213
228
|
reset = "\033[0m"
|
214
229
|
start_time = time.time()
|
215
230
|
|
@@ -222,10 +237,13 @@ def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperat
|
|
222
237
|
if 'gemini' in model:
|
223
238
|
user_prompt = system_prompt + "\n" + user_prompt
|
224
239
|
|
240
|
+
input_content = system_prompt + "\n" + user_prompt + "\n" + files_content
|
241
|
+
token_count = len(enc.encode(input_content))
|
225
242
|
messages = [
|
226
|
-
{"role": "system", "content":
|
243
|
+
{"role": "system", "content": system_prompt},
|
227
244
|
{"role": "user", "content": user_prompt + "\n" + files_content},
|
228
245
|
]
|
246
|
+
|
229
247
|
if VERBOSE:
|
230
248
|
print(f"{green}Using {model}{reset}")
|
231
249
|
print(f"{green}SYSTEM PROMPT{reset}")
|
@@ -233,7 +251,7 @@ def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperat
|
|
233
251
|
print(f"{green}USER PROMPT{reset}")
|
234
252
|
print(user_prompt, "+", len(enc.encode(files_content)), "tokens of file content")
|
235
253
|
else:
|
236
|
-
print("Generating diff...")
|
254
|
+
print(f"Generating diff using model '{green}{model}{reset}' from '{blue}{domain_for_url(base_url)}{reset}' with {token_count} input tokens...")
|
237
255
|
|
238
256
|
if not api_key:
|
239
257
|
api_key = os.getenv('GPTDIFF_LLM_API_KEY')
|
@@ -248,9 +266,9 @@ def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperat
|
|
248
266
|
temperature=temperature)
|
249
267
|
|
250
268
|
if VERBOSE:
|
251
|
-
print("
|
269
|
+
print("Debug: Raw LLM Response\n---")
|
252
270
|
print(response.choices[0].message.content.strip())
|
253
|
-
print("
|
271
|
+
print("---")
|
254
272
|
else:
|
255
273
|
print("Diff generated.")
|
256
274
|
|
@@ -265,9 +283,6 @@ def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperat
|
|
265
283
|
print("-" * 40)
|
266
284
|
|
267
285
|
# Now, these rates are updated to per million tokens
|
268
|
-
cost_per_million_prompt_tokens = 30
|
269
|
-
cost_per_million_completion_tokens = 60
|
270
|
-
cost = (prompt_tokens / 1_000_000 * cost_per_million_prompt_tokens) + (completion_tokens / 1_000_000 * cost_per_million_completion_tokens)
|
271
286
|
|
272
287
|
full_response = response.choices[0].message.content.strip()
|
273
288
|
full_response, reasoning = swallow_reasoning(full_response)
|
@@ -279,7 +294,7 @@ def call_llm_for_diff(system_prompt, user_prompt, files_content, model, temperat
|
|
279
294
|
toolbox.use(event)
|
280
295
|
diff_response = diff_context.get()
|
281
296
|
|
282
|
-
return full_response, diff_response, prompt_tokens, completion_tokens, total_tokens
|
297
|
+
return full_response, diff_response, prompt_tokens, completion_tokens, total_tokens
|
283
298
|
|
284
299
|
# New API functions
|
285
300
|
def build_environment(files_dict):
|
@@ -538,14 +553,23 @@ def smart_apply_patch(project_dir, diff_text, user_prompt, args):
|
|
538
553
|
Attempt to apply a diff via smartapply: process each file concurrently using the LLM.
|
539
554
|
"""
|
540
555
|
from pathlib import Path
|
556
|
+
start_time = time.time()
|
541
557
|
parsed_diffs = parse_diff_per_file(diff_text)
|
542
558
|
print("Found", len(parsed_diffs), "files in diff, processing smart apply concurrently:")
|
559
|
+
green = "\033[92m"
|
560
|
+
red = "\033[91m"
|
561
|
+
blue = "\033[94m"
|
562
|
+
reset = "\033[0m"
|
563
|
+
|
543
564
|
if len(parsed_diffs) == 0:
|
544
565
|
print(colorize_warning_warning("There were no entries in this diff. The LLM may have returned something invalid."))
|
545
566
|
if args.beep:
|
546
567
|
print("\a")
|
547
568
|
return
|
548
569
|
threads = []
|
570
|
+
success_files = []
|
571
|
+
failed_files = []
|
572
|
+
success_lock = Lock()
|
549
573
|
|
550
574
|
def process_file(file_path, file_diff):
|
551
575
|
full_path = Path(project_dir) / file_path
|
@@ -559,11 +583,14 @@ def smart_apply_patch(project_dir, diff_text, user_prompt, args):
|
|
559
583
|
print(colorize_warning_warning(f"File {file_path} not found - skipping deletion"))
|
560
584
|
return
|
561
585
|
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
586
|
+
original_content = ""
|
587
|
+
if full_path.exists():
|
588
|
+
try:
|
589
|
+
original_content = full_path.read_text()
|
590
|
+
except (UnicodeDecodeError, IOError) as e:
|
591
|
+
print(f"Cannot read {file_path} due to {str(e)}, treating as new file")
|
592
|
+
else:
|
593
|
+
print(f"File {file_path} does not exist, treating as new file")
|
567
594
|
|
568
595
|
# Use SMARTAPPLY-specific environment variables if set, otherwise fallback.
|
569
596
|
smart_apply_model = os.getenv("GPTDIFF_SMARTAPPLY_MODEL")
|
@@ -586,9 +613,7 @@ def smart_apply_patch(project_dir, diff_text, user_prompt, args):
|
|
586
613
|
else:
|
587
614
|
base_url = os.getenv("GPTDIFF_LLM_BASE_URL", "https://nano-gpt.com/api/v1/")
|
588
615
|
|
589
|
-
print("
|
590
|
-
print("Running smartapply with", model, "on", file_path)
|
591
|
-
print("-" * 40)
|
616
|
+
print(f"Running smartapply in parallel using model '{green}{model}{reset}' from '{blue}{domain_for_url(base_url)}{reset}'...")
|
592
617
|
try:
|
593
618
|
updated_content = call_llm_for_apply_with_think_tool_available(
|
594
619
|
file_path, original_content, file_diff, model,
|
@@ -597,12 +622,18 @@ def smart_apply_patch(project_dir, diff_text, user_prompt, args):
|
|
597
622
|
max_tokens=args.max_tokens)
|
598
623
|
if updated_content.strip() == "":
|
599
624
|
print("Cowardly refusing to write empty file to", file_path, "merge failed")
|
625
|
+
with success_lock:
|
626
|
+
failed_files.append(file_path)
|
600
627
|
return
|
601
628
|
full_path.parent.mkdir(parents=True, exist_ok=True)
|
602
629
|
full_path.write_text(updated_content)
|
603
630
|
print(f"\033[1;32mSuccessful 'smartapply' update {file_path}.\033[0m")
|
631
|
+
with success_lock:
|
632
|
+
success_files.append(file_path)
|
604
633
|
except Exception as e:
|
605
634
|
print(f"\033[1;31mFailed to process {file_path}: {str(e)}\033[0m")
|
635
|
+
with success_lock:
|
636
|
+
failed_files.append(file_path)
|
606
637
|
|
607
638
|
for file_path, file_diff in parsed_diffs:
|
608
639
|
thread = threading.Thread(target=process_file, args=(file_path, file_diff))
|
@@ -610,7 +641,17 @@ def smart_apply_patch(project_dir, diff_text, user_prompt, args):
|
|
610
641
|
threads.append(thread)
|
611
642
|
for thread in threads:
|
612
643
|
thread.join()
|
613
|
-
|
644
|
+
elapsed = time.time() - start_time
|
645
|
+
minutes, seconds = divmod(int(elapsed), 60)
|
646
|
+
time_str = f"{minutes}m {seconds}s" if minutes else f"{seconds}s"
|
647
|
+
print(f"Smartapply successfully applied changes in {time_str}. Check the updated files to confirm.")
|
648
|
+
if failed_files:
|
649
|
+
print(f"\033[1;31mSmart apply completed in {time_str} with failures for {len(failed_files)} files:\033[0m")
|
650
|
+
for file in failed_files:
|
651
|
+
print(f" - {file}")
|
652
|
+
print("Please check the errors above for details.")
|
653
|
+
else:
|
654
|
+
print(f"\033[1;32mSmart apply completed successfully in {time_str} for all {len(success_files)} files.\033[0m")
|
614
655
|
if args.beep:
|
615
656
|
print("\a")
|
616
657
|
|
@@ -710,9 +751,8 @@ def main():
|
|
710
751
|
with open('prompt.txt', 'w') as f:
|
711
752
|
f.write(full_prompt)
|
712
753
|
print(f"Total tokens: {token_count:5d}")
|
713
|
-
print(f"\033[1;
|
754
|
+
print(f"\033[1;32mWrote full prompt to prompt.txt.\033[0m")
|
714
755
|
print('Instead, wrote full prompt to prompt.txt. Use `xclip -selection clipboard < prompt.txt` then paste into chatgpt')
|
715
|
-
print(f"Total cost: ${0.0:.4f}")
|
716
756
|
exit(0)
|
717
757
|
else:
|
718
758
|
# Validate API key presence before any API operations
|
@@ -729,7 +769,7 @@ def main():
|
|
729
769
|
print("Request canceled")
|
730
770
|
sys.exit(0)
|
731
771
|
try:
|
732
|
-
full_text, diff_text, prompt_tokens, completion_tokens, total_tokens
|
772
|
+
full_text, diff_text, prompt_tokens, completion_tokens, total_tokens = call_llm_for_diff(system_prompt, user_prompt, files_content, args.model,
|
733
773
|
temperature=args.temperature,
|
734
774
|
api_key=os.getenv('GPTDIFF_LLM_API_KEY'),
|
735
775
|
base_url=os.getenv('GPTDIFF_LLM_BASE_URL', "https://nano-gpt.com/api/v1/"),
|
@@ -741,35 +781,40 @@ def main():
|
|
741
781
|
prompt_tokens = 0
|
742
782
|
completion_tokens = 0
|
743
783
|
total_tokens = 0
|
744
|
-
cost = 0
|
745
784
|
print(f"Error in LLM response {e}")
|
746
785
|
|
747
786
|
if(diff_text.strip() == ""):
|
748
|
-
print(f"\033[1;
|
749
|
-
print("
|
787
|
+
print(f"\033[1;33mWarning: No valid diff data was generated. This could be due to an unclear prompt or an invalid LLM response.\033[0m")
|
788
|
+
print("Suggested action: Refine your prompt or check the full response below for clues.")
|
789
|
+
print("Full LLM response:\n---\n" + full_text + "\n---")
|
750
790
|
if args.beep:
|
751
|
-
print("\a")
|
791
|
+
print("\a")
|
752
792
|
return
|
753
793
|
|
754
794
|
elif args.apply:
|
755
795
|
print("\nAttempting apply with the following diff:")
|
756
|
-
print("\n<diff>")
|
757
796
|
print(color_code_diff(diff_text))
|
758
|
-
print("\
|
759
|
-
|
760
|
-
if
|
761
|
-
print(f"\033[1;32mPatch applied successfully with
|
797
|
+
print("\033[94m**Attempting to apply patch using basic method...**\033[0m")
|
798
|
+
apply_result = apply_diff(project_dir, diff_text)
|
799
|
+
if apply_result:
|
800
|
+
print(f"\033[1;32mPatch applied successfully with basic apply.\033[0m")
|
762
801
|
else:
|
763
|
-
print("
|
802
|
+
print("\033[94m**Attempting smart apply with LLM...**\033[0m")
|
764
803
|
smart_apply_patch(project_dir, diff_text, user_prompt, args)
|
765
804
|
|
766
805
|
if args.beep:
|
767
|
-
print("\a")
|
806
|
+
print("\a")
|
768
807
|
|
769
|
-
|
770
|
-
|
771
|
-
|
772
|
-
|
808
|
+
green = "\033[92m"
|
809
|
+
reset = "\033[0m"
|
810
|
+
if VERBOSE:
|
811
|
+
print("API Usage Details:")
|
812
|
+
print(f"- Prompt tokens: {prompt_tokens}")
|
813
|
+
print(f"- Completion tokens: {completion_tokens}")
|
814
|
+
print(f"- Total tokens: {total_tokens}")
|
815
|
+
print(f"- Model used: {green}{args.model}{reset}")
|
816
|
+
else:
|
817
|
+
print(f"API Usage: {total_tokens} tokens, Model used: {green}{args.model}{reset}")
|
773
818
|
|
774
819
|
def swallow_reasoning(full_response: str) -> (str, str):
|
775
820
|
"""
|
@@ -13,7 +13,7 @@ This tool uses the same patch-application logic as gptdiff.
|
|
13
13
|
import sys
|
14
14
|
import argparse
|
15
15
|
from pathlib import Path
|
16
|
-
from gptdiff.gptdiff import apply_diff, smart_apply_patch
|
16
|
+
from gptdiff.gptdiff import apply_diff, smart_apply_patch, color_code_diff
|
17
17
|
|
18
18
|
def parse_arguments():
|
19
19
|
parser = argparse.ArgumentParser(
|
@@ -49,11 +49,14 @@ def parse_arguments():
|
|
49
49
|
default=30000,
|
50
50
|
help="Maximum tokens to use for LLM responses"
|
51
51
|
)
|
52
|
+
parser.add_argument('--verbose', action='store_true', help='Enable verbose output with detailed information')
|
52
53
|
parser.add_argument('--dumb', action='store_true', default=False, help='Attempt dumb apply before trying smart apply')
|
53
54
|
return parser.parse_args()
|
54
55
|
|
55
56
|
def main():
|
56
57
|
args = parse_arguments()
|
58
|
+
import gptdiff.gptdiff as gd
|
59
|
+
gd.VERBOSE = args.verbose
|
57
60
|
if args.diff:
|
58
61
|
diff_text = args.diff
|
59
62
|
else:
|
@@ -64,15 +67,21 @@ def main():
|
|
64
67
|
diff_text = diff_path.read_text(encoding="utf8")
|
65
68
|
|
66
69
|
project_dir = args.project_dir
|
70
|
+
|
71
|
+
if args.verbose:
|
72
|
+
print("\n\033[1;34mDiff to be applied:\033[0m")
|
73
|
+
print(color_code_diff(diff_text))
|
74
|
+
print("")
|
75
|
+
|
67
76
|
if args.dumb:
|
68
77
|
success = apply_diff(project_dir, diff_text)
|
69
78
|
if success:
|
70
|
-
print("✅ Diff applied successfully
|
79
|
+
print("\033[1;32m✅ Diff applied successfully.\033[0m")
|
71
80
|
else:
|
72
|
-
print("❌ Failed to apply diff using git apply. Attempting smart apply
|
81
|
+
print("\033[1;31m❌ Failed to apply diff using git apply. Attempting smart apply.\033[0m")
|
73
82
|
smart_apply_patch(project_dir, diff_text, "", args)
|
74
83
|
else:
|
75
84
|
smart_apply_patch(project_dir, diff_text, "", args)
|
76
85
|
|
77
86
|
if __name__ == "__main__":
|
78
|
-
main()
|
87
|
+
main()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: gptdiff
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.27
|
4
4
|
Summary: A tool to generate and apply git diffs using LLMs
|
5
5
|
Author: 255labs
|
6
6
|
Classifier: License :: OSI Approved :: MIT License
|
@@ -82,13 +82,15 @@ done
|
|
82
82
|
|
83
83
|
*Requires reasoning model*
|
84
84
|
|
85
|
-
|
85
|
+
## Why Choose GPTDiff?
|
86
86
|
|
87
|
-
- **
|
88
|
-
- **
|
89
|
-
- **Auto-
|
90
|
-
- **
|
91
|
-
- **
|
87
|
+
- **Describe changes in plain English**
|
88
|
+
- **AI gets your whole project**
|
89
|
+
- **Auto-fixes conflicts**
|
90
|
+
- **Keeps code functional**
|
91
|
+
- **Fast setup, no fuss**
|
92
|
+
- **You approve every change**
|
93
|
+
- **Costs are upfront**
|
92
94
|
|
93
95
|
## Core Capabilities
|
94
96
|
|
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
|
2
2
|
|
3
3
|
setup(
|
4
4
|
name='gptdiff',
|
5
|
-
version='0.1.
|
5
|
+
version='0.1.27',
|
6
6
|
description='A tool to generate and apply git diffs using LLMs',
|
7
7
|
author='255labs',
|
8
8
|
packages=find_packages(), # Use find_packages() to automatically discover packages
|
@@ -141,3 +141,31 @@ def test_new_file_creation_minimal_header_failure(tmp_project_dir_empty):
|
|
141
141
|
content = new_file.read_text()
|
142
142
|
assert content.strip() == expected_content.strip(), f"Expected file content:\n{expected_content}\nGot:\n{content}"
|
143
143
|
|
144
|
+
@pytest.fixture
|
145
|
+
def tmp_project_dir_with_gptdiff(tmp_path):
|
146
|
+
"""
|
147
|
+
Create a temporary project directory with a gptdiff.py file containing four lines.
|
148
|
+
"""
|
149
|
+
project_dir = tmp_path / "project"
|
150
|
+
project_dir.mkdir()
|
151
|
+
file = project_dir / "gptdiff.py"
|
152
|
+
file.write_text("#!/usr/bin/env python3\nfrom pathlib import Path\n# Line 3\n# Line 4\n")
|
153
|
+
return project_dir
|
154
|
+
|
155
|
+
def test_apply_bad_diff_fails(tmp_project_dir_with_gptdiff):
|
156
|
+
"""
|
157
|
+
Test that a diff is applied correctly to the file.
|
158
|
+
"""
|
159
|
+
diff_text = """diff --git a/gptdiff/gptdiff.py b/gptdiff/gptdiff.py
|
160
|
+
index 1234567..89abcde 100644
|
161
|
+
--- a/gptdiff/gptdiff.py
|
162
|
+
+++ b/gptdiff/gptdiff.py
|
163
|
+
@@ -1,4 +1,5 @@
|
164
|
+
#!/usr/bin/env python3
|
165
|
+
+from threading import Lock
|
166
|
+
from pathlib import Path"""
|
167
|
+
|
168
|
+
# Assume apply_diff is a function that applies the diff
|
169
|
+
from gptdiff.gptdiff import apply_diff
|
170
|
+
result = apply_diff(str(tmp_project_dir_with_gptdiff), diff_text)
|
171
|
+
assert result is False, "apply_diff should fail, needs smartapply"
|
@@ -126,6 +126,20 @@ diff --git a/file2.py b/file2.py
|
|
126
126
|
self.assertIn("file1.py", paths)
|
127
127
|
self.assertIn("file2.py", paths)
|
128
128
|
|
129
|
+
def test_parse_diff_per_file_unconventional_header():
|
130
|
+
diff_text = """--- game.js
|
131
|
+
+++ game.js
|
132
|
+
@@ -0,0 +1,3 @@
|
133
|
+
+let player = {
|
134
|
+
+ class: "Warrior",
|
135
|
+
+};
|
136
|
+
"""
|
137
|
+
result = parse_diff_per_file(diff_text)
|
138
|
+
assert len(result) == 1, f"Expected one file patch, got {len(result)}"
|
139
|
+
file_path, patch = result[0]
|
140
|
+
assert file_path == "game.js", f"Expected file path 'game.js', got '{file_path}'"
|
141
|
+
assert "+++ game.js" in patch, "Expected patch to include '+++ game.js'"
|
142
|
+
assert "+let player" in patch, "Expected patch to include added lines"
|
129
143
|
|
130
144
|
if __name__ == '__main__':
|
131
145
|
unittest.main()
|
@@ -244,3 +244,23 @@ def test_smartapply_complex_single_hunk(monkeypatch):
|
|
244
244
|
assert "if not data:" in updated
|
245
245
|
assert "temp = data * 2" not in updated
|
246
246
|
assert "for x in data:" in updated
|
247
|
+
|
248
|
+
def test_smartapply_new_file_with_incorrect_header(monkeypatch):
|
249
|
+
"""Test that smartapply creates a new file from a diff with an incorrect '--- a/' header."""
|
250
|
+
diff_text = """
|
251
|
+
diff --git a/game.js b/game.js
|
252
|
+
--- a/game.js
|
253
|
+
++++ b/game.js
|
254
|
+
@@ -0,0 +1,3 @@
|
255
|
+
+let player = {
|
256
|
+
+ class: "Warrior",
|
257
|
+
+};
|
258
|
+
"""
|
259
|
+
original_files = {}
|
260
|
+
expected_content = "let player = {\n class: \"Warrior\",\n};"
|
261
|
+
def mock_call_llm(file_path, original_content, file_diff, model, api_key, base_url, extra_prompt=None, max_tokens=None):
|
262
|
+
return expected_content
|
263
|
+
monkeypatch.setattr('gptdiff.gptdiff.call_llm_for_apply', mock_call_llm)
|
264
|
+
updated_files = smartapply(diff_text, original_files)
|
265
|
+
assert "game.js" in updated_files, "The new file 'game.js' should be created"
|
266
|
+
assert updated_files["game.js"] == expected_content, "The file content should match the diff"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|