gptdiff 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
gptdiff/gptdiff.py CHANGED
@@ -44,6 +44,9 @@ a/file.py b/file.py
44
44
  @@ -1,2 +1,2 @@
45
45
  -def old():
46
46
  +def new():
47
+
48
+ -
49
+ You must include the '--- file' and/or '+++ file' part of the diff. File modifications should include both.
47
50
  """
48
51
  )
49
52
  return toolbox
@@ -67,6 +70,25 @@ def create_think_toolbox():
67
70
  )
68
71
  return toolbox
69
72
 
73
+ def color_code_diff(diff_text: str) -> str:
74
+ """
75
+ Color code lines in a diff. Lines beginning with '-' in red, and
76
+ lines beginning with '+' in green.
77
+ """
78
+ red = "\033[31m"
79
+ green = "\033[32m"
80
+ reset = "\033[0m"
81
+
82
+ colorized_lines = []
83
+ for line in diff_text.split('\n'):
84
+ if line.startswith('-'):
85
+ colorized_lines.append(f"{red}{line}{reset}")
86
+ elif line.startswith('+'):
87
+ colorized_lines.append(f"{green}{line}{reset}")
88
+ else:
89
+ colorized_lines.append(line)
90
+
91
+ return '\n'.join(colorized_lines)
70
92
 
71
93
  def load_gitignore_patterns(gitignore_path):
72
94
  with open(gitignore_path, 'r') as f:
@@ -308,8 +330,16 @@ def smartapply(diff_text, files, model=None, api_key=None, base_url=None):
308
330
  updated = call_llm_for_apply_with_think_tool_available(path, original, patch, model, api_key=api_key, base_url=base_url)
309
331
  files[path] = updated.strip()
310
332
 
333
+ threads = []
334
+
311
335
  for path, patch in parsed_diffs:
312
- process_file(path, patch)
336
+ thread = threading.Thread(target=process_file, args=(path, patch))
337
+ thread.start()
338
+ threads.append(thread)
339
+
340
+ # Wait for all threads to complete
341
+ for thread in threads:
342
+ thread.join()
313
343
 
314
344
  return files
315
345
 
@@ -339,6 +369,7 @@ def parse_arguments():
339
369
  parser.add_argument('--temperature', type=float, default=0.7, help='Temperature parameter for model creativity (0.0 to 2.0)')
340
370
  parser.add_argument('--max_tokens', type=int, default=30000, help='Temperature parameter for model creativity (0.0 to 2.0)')
341
371
  parser.add_argument('--model', type=str, default=None, help='Model to use for the API call.')
372
+ parser.add_argument('--applymodel', type=str, default=None, help='Model to use for applying the diff. Defaults to the value of --model if not specified.')
342
373
 
343
374
  parser.add_argument('--nowarn', action='store_true', help='Disable large token warning')
344
375
 
@@ -404,11 +435,11 @@ def parse_diff_per_file(diff_text):
404
435
 
405
436
  return diffs
406
437
 
407
- def call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, model, api_key=None, base_url=None):
438
+ def call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, model, api_key=None, base_url=None, extra_prompt=None, max_tokens=30000):
408
439
  parser = FlatXMLParser("think")
409
440
  formatter = FlatXMLPromptFormatter(tag="think")
410
441
  toolbox = create_think_toolbox()
411
- full_response = call_llm_for_apply(file_path, original_content, file_diff, model, api_key=None, base_url=None)
442
+ full_response = call_llm_for_apply(file_path, original_content, file_diff, model, api_key=api_key, base_url=base_url, extra_prompt=extra_prompt, max_tokens=max_tokens)
412
443
  notool_response = ""
413
444
  events = parser.parse(full_response)
414
445
  is_in_tool = False
@@ -424,7 +455,7 @@ def call_llm_for_apply_with_think_tool_available(file_path, original_content, fi
424
455
 
425
456
  return notool_response
426
457
 
427
- def call_llm_for_apply(file_path, original_content, file_diff, model, api_key=None, base_url=None):
458
+ def call_llm_for_apply(file_path, original_content, file_diff, model, api_key=None, base_url=None, extra_prompt=None, max_tokens=30000):
428
459
  """AI-powered diff application with conflict resolution.
429
460
 
430
461
  Internal workhorse for smartapply that handles individual file patches.
@@ -474,6 +505,8 @@ Diff to apply:
474
505
  {file_diff}
475
506
  </diff>"""
476
507
 
508
+ if extra_prompt:
509
+ user_prompt += f"\n\n{extra_prompt}"
477
510
  if model == "gemini-2.0-flash-thinking-exp-01-21":
478
511
  user_prompt = system_prompt+"\n"+user_prompt
479
512
  messages = [
@@ -490,7 +523,7 @@ Diff to apply:
490
523
  response = client.chat.completions.create(model=model,
491
524
  messages=messages,
492
525
  temperature=0.0,
493
- max_tokens=30000)
526
+ max_tokens=max_tokens)
494
527
  full_response = response.choices[0].message.content
495
528
 
496
529
  elapsed = time.time() - start_time
@@ -590,12 +623,21 @@ def main():
590
623
  if confirmation != 'y':
591
624
  print("Request canceled")
592
625
  sys.exit(0)
593
- full_text, diff_text, prompt_tokens, completion_tokens, total_tokens, cost = call_llm_for_diff(system_prompt, user_prompt, files_content, args.model,
626
+ try:
627
+ full_text, diff_text, prompt_tokens, completion_tokens, total_tokens, cost = call_llm_for_diff(system_prompt, user_prompt, files_content, args.model,
594
628
  temperature=args.temperature,
595
629
  api_key=os.getenv('GPTDIFF_LLM_API_KEY'),
596
630
  base_url=os.getenv('GPTDIFF_LLM_BASE_URL', "https://nano-gpt.com/api/v1/"),
597
631
  max_tokens=args.max_tokens
598
632
  )
633
+ except Exception as e:
634
+ full_text = f"{e}"
635
+ diff_text = ""
636
+ prompt_tokens = 0
637
+ completion_tokens = 0
638
+ total_tokens = 0
639
+ cost = 0
640
+ print(f"Error in LLM response {e}")
599
641
 
600
642
  if(diff_text.strip() == ""):
601
643
  print(f"\033[1;33mThere was no data in this diff. The LLM may have returned something invalid.\033[0m")
@@ -608,7 +650,7 @@ def main():
608
650
  elif args.apply:
609
651
  print("\nAttempting apply with the following diff:")
610
652
  print("\n<diff>")
611
- print(diff_text)
653
+ print(color_code_diff(diff_text))
612
654
  print("\n</diff>")
613
655
  print("Saved to patch.diff")
614
656
  if apply_diff(project_dir, diff_text):
@@ -651,8 +693,11 @@ def main():
651
693
  print("SMARTAPPLY")
652
694
  print(file_diff)
653
695
  print("-" * 40)
696
+ if args.applymodel is None:
697
+ args.applymodel = args.model
698
+
654
699
  try:
655
- updated_content = call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, args.model)
700
+ updated_content = call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, args.applymodel, extra_prompt=f"This changeset is from the following instructions:\n{user_prompt}", max_tokens=args.max_tokens)
656
701
 
657
702
  if updated_content.strip() == "":
658
703
  print("Cowardly refusing to write empty file to", file_path, "merge failed")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gptdiff
3
- Version: 0.1.8
3
+ Version: 0.1.10
4
4
  Summary: A tool to generate and apply git diffs using LLMs
5
5
  Author: 255labs
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -27,21 +27,54 @@ Dynamic: summary
27
27
 
28
28
  # GPTDiff
29
29
 
30
- 🚀 **AI-Powered Code Evolution** - Transform your codebase with natural language instructions
30
+ 🚀 **Create and apply diffs with AI** - Modify projects with natural language
31
+
32
+ More docs at [gptdiff.255labs.xyz](gptdiff.255labs.xyz)
33
+
34
+ ### Example Usage of `gptdiff`
35
+
36
+ #### Apply a Patch Directly
37
+ ```
38
+ bash
39
+ gptdiff "Add button animations on press" --apply
40
+ ```
41
+ ✅ Successfully applied patch
42
+
43
+ #### Generate a Patch File
44
+ ```
45
+ bash
46
+ gptdiff "Add API documentation" --call
47
+ ```
48
+ 🔧 Patch written to `diff.patch`
49
+
50
+ #### Generate a Prompt File Without Calling LLM
51
+ ```
52
+ bash
53
+ gptdiff "Improve error messages"
54
+ ```
55
+ 📄 LLM not called, written to `prompt.txt`
56
+
57
+ ---
58
+
59
+ ### Basic Usage
31
60
 
32
61
  ```bash
33
62
  cd myproject
34
63
  gptdiff 'add hover effects to the buttons'
35
64
  ```
36
65
 
37
- Generates a prompt.txt file that you can copy and paste into a large context gpt to have a conversation with suggested changes. You can also invoke the API and try to directly apply the patch using a smartapply if the git apply fails.
66
+ Generates a prompt.txt file that you can copy and paste into an LLM
38
67
 
39
- ## Value Proposition
68
+ ### Simple command line agent loops
40
69
 
41
70
  ```bash
42
- gptdiff "Update the readme with an api section" --apply
71
+ while
72
+ do
73
+ gptdiff "Add missing test cases" --apply
74
+ done
43
75
  ```
44
- <span style="color: #00ff00;">Patch applied successfully.</span>
76
+
77
+ *Requires reasoning model*
45
78
 
46
79
  ### Why GPTDiff?
47
80
 
@@ -0,0 +1,8 @@
1
+ gptdiff/__init__.py,sha256=yGjgwv7tNvH1ZLPsQyoo1CxpTOl1iCAwwDBp-_17ksQ,89
2
+ gptdiff/gptdiff.py,sha256=78_Y1ifKxCdC-e8TdKm3kKDklyV0K8S0fKyDdXhLNQs,27706
3
+ gptdiff-0.1.10.dist-info/LICENSE.txt,sha256=zCJk7yUYpMjFvlipi1dKtaljF8WdZ2NASndBYYbU8BY,1228
4
+ gptdiff-0.1.10.dist-info/METADATA,sha256=jw4gVLU2Gk7Iuqy-NdkeDlF9loiLfd7825lZxXIbQEY,7602
5
+ gptdiff-0.1.10.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
+ gptdiff-0.1.10.dist-info/entry_points.txt,sha256=0yvXYEVAZFI-p32kQ4-h3qKVWS0a86jsM9FAwF89t9w,49
7
+ gptdiff-0.1.10.dist-info/top_level.txt,sha256=XNkQkQGINaDndEwRxg8qToOrJ9coyfAb-EHrSUXzdCE,8
8
+ gptdiff-0.1.10.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- gptdiff/__init__.py,sha256=yGjgwv7tNvH1ZLPsQyoo1CxpTOl1iCAwwDBp-_17ksQ,89
2
- gptdiff/gptdiff.py,sha256=kzLeNc5M3saCdqBOoE-OjkyHztGbQr39XPiVcFJ_pyY,25958
3
- gptdiff-0.1.8.dist-info/LICENSE.txt,sha256=zCJk7yUYpMjFvlipi1dKtaljF8WdZ2NASndBYYbU8BY,1228
4
- gptdiff-0.1.8.dist-info/METADATA,sha256=ErNMAtAqN7HDzarpVJi0IVA5i1pryUQwkjVjus9kjkM,7317
5
- gptdiff-0.1.8.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
- gptdiff-0.1.8.dist-info/entry_points.txt,sha256=0yvXYEVAZFI-p32kQ4-h3qKVWS0a86jsM9FAwF89t9w,49
7
- gptdiff-0.1.8.dist-info/top_level.txt,sha256=XNkQkQGINaDndEwRxg8qToOrJ9coyfAb-EHrSUXzdCE,8
8
- gptdiff-0.1.8.dist-info/RECORD,,