gptdiff 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
gptdiff/gptdiff.py CHANGED
@@ -4,6 +4,7 @@ import openai
4
4
  from openai import OpenAI
5
5
 
6
6
  import tiktoken
7
+ import time
7
8
 
8
9
  import os
9
10
  import json
@@ -20,7 +21,7 @@ import threading
20
21
  from pkgutil import get_data
21
22
 
22
23
  diff_context = contextvars.ContextVar('diffcontent', default="")
23
- def create_toolbox():
24
+ def create_diff_toolbox():
24
25
  toolbox = Toolbox()
25
26
 
26
27
  def diff(content: str):
@@ -47,6 +48,25 @@ a/file.py b/file.py
47
48
  )
48
49
  return toolbox
49
50
 
51
+ def create_think_toolbox():
52
+ toolbox = Toolbox()
53
+
54
+ def think(content: str):
55
+ print("Swallowed thoughts", content)
56
+
57
+ toolbox.add_tool(
58
+ name="think",
59
+ fn=think,
60
+ args={
61
+ "content": {
62
+ "type": "string",
63
+ "description": "Thoughts"
64
+ }
65
+ },
66
+ description=""
67
+ )
68
+ return toolbox
69
+
50
70
 
51
71
  def load_gitignore_patterns(gitignore_path):
52
72
  with open(gitignore_path, 'r') as f:
@@ -146,10 +166,12 @@ def load_prepend_file(file):
146
166
 
147
167
  # Function to call GPT-4 API and calculate the cost
148
168
  def call_gpt4_api(system_prompt, user_prompt, files_content, model, temperature=0.7, max_tokens=2500, api_key=None, base_url=None):
169
+ enc = tiktoken.get_encoding("o200k_base")
170
+ start_time = time.time()
149
171
 
150
172
  parser = FlatXMLParser("diff")
151
173
  formatter = FlatXMLPromptFormatter(tag="diff")
152
- toolbox = create_toolbox()
174
+ toolbox = create_diff_toolbox()
153
175
  tool_prompt = formatter.usage_prompt(toolbox)
154
176
  system_prompt += "\n"+tool_prompt
155
177
 
@@ -164,7 +186,7 @@ def call_gpt4_api(system_prompt, user_prompt, files_content, model, temperature=
164
186
  print("SYSTEM PROMPT")
165
187
  print(system_prompt)
166
188
  print("USER PROMPT")
167
- print(user_prompt, "+", len(files_content), "characters of file content")
189
+ print(user_prompt, "+", len(enc.encode(files_content)), "tokens of file content")
168
190
 
169
191
  if api_key is None:
170
192
  api_key = os.getenv('GPTDIFF_LLM_API_KEY')
@@ -180,6 +202,12 @@ def call_gpt4_api(system_prompt, user_prompt, files_content, model, temperature=
180
202
  completion_tokens = response.usage.completion_tokens
181
203
  total_tokens = response.usage.total_tokens
182
204
 
205
+ elapsed = time.time() - start_time
206
+ minutes, seconds = divmod(int(elapsed), 60)
207
+ time_str = f"{minutes}m {seconds}s" if minutes else f"{seconds}s"
208
+ print(f"Diff creation time: {time_str}")
209
+ print("-" * 40)
210
+
183
211
  # Now, these rates are updated to per million tokens
184
212
  cost_per_million_prompt_tokens = 30
185
213
  cost_per_million_completion_tokens = 60
@@ -187,7 +215,6 @@ def call_gpt4_api(system_prompt, user_prompt, files_content, model, temperature=
187
215
 
188
216
  full_response = response.choices[0].message.content.strip()
189
217
 
190
-
191
218
  events = parser.parse(full_response)
192
219
  for event in events:
193
220
  toolbox.use(event)
@@ -266,7 +293,10 @@ def smartapply(diff_text, files, model=None, api_key=None, base_url=None):
266
293
  if model is None:
267
294
  model = os.getenv('GPTDIFF_MODEL', 'deepseek-reasoner')
268
295
  parsed_diffs = parse_diff_per_file(diff_text)
269
- print("SMARTAPPLY", diff_text)
296
+ print("-" * 40)
297
+ print("SMARTAPPLY")
298
+ print(diff_text)
299
+ print("-" * 40)
270
300
 
271
301
  def process_file(path, patch):
272
302
  original = files.get(path, '')
@@ -275,12 +305,12 @@ def smartapply(diff_text, files, model=None, api_key=None, base_url=None):
275
305
  if path in files:
276
306
  del files[path]
277
307
  else:
278
- updated = call_llm_for_apply(path, original, patch, model, api_key=api_key, base_url=base_url)
308
+ updated = call_llm_for_apply_with_think_tool_available(path, original, patch, model, api_key=api_key, base_url=base_url)
279
309
  files[path] = updated.strip()
280
-
310
+
281
311
  for path, patch in parsed_diffs:
282
312
  process_file(path, patch)
283
-
313
+
284
314
  return files
285
315
 
286
316
  # Function to apply diff to project files
@@ -320,20 +350,20 @@ def absolute_to_relative(absolute_path):
320
350
 
321
351
  def parse_diff_per_file(diff_text):
322
352
  """Parse unified diff text into individual file patches.
323
-
353
+
324
354
  Splits a multi-file diff into per-file entries for processing. Handles:
325
355
  - File creations (+++ /dev/null)
326
356
  - File deletions (--- /dev/null)
327
357
  - Standard modifications
328
-
358
+
329
359
  Args:
330
360
  diff_text: Unified diff string as generated by `git diff`
331
-
361
+
332
362
  Returns:
333
363
  List of tuples (file_path, patch) where:
334
364
  - file_path: Relative path to modified file
335
365
  - patch: Full diff fragment for this file
336
-
366
+
337
367
  Note:
338
368
  Uses 'b/' prefix detection from git diffs to determine target paths
339
369
  """
@@ -373,6 +403,26 @@ def parse_diff_per_file(diff_text):
373
403
 
374
404
  return diffs
375
405
 
406
+ def call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, model, api_key=None, base_url=None):
407
+ parser = FlatXMLParser("think")
408
+ formatter = FlatXMLPromptFormatter(tag="think")
409
+ toolbox = create_think_toolbox()
410
+ full_response = call_llm_for_apply(file_path, original_content, file_diff, model, api_key=None, base_url=None)
411
+ notool_response = ""
412
+ events = parser.parse(full_response)
413
+ is_in_tool = False
414
+ appended_content = ""
415
+ for event in events:
416
+ if event.mode == 'append':
417
+ appended_content += event.content
418
+ if event.mode == 'close' and appended_content and event.tool is None:
419
+ notool_response += appended_content
420
+ if event.mode == 'close':
421
+ appended_content = ""
422
+ toolbox.use(event)
423
+
424
+ return notool_response
425
+
376
426
  def call_llm_for_apply(file_path, original_content, file_diff, model, api_key=None, base_url=None):
377
427
  """AI-powered diff application with conflict resolution.
378
428
 
@@ -409,7 +459,8 @@ def call_llm_for_apply(file_path, original_content, file_diff, model, api_key=No
409
459
 
410
460
  1. Carefully apply all changes from the diff
411
461
  2. Preserve surrounding context that isn't changed
412
- 3. Only return the final file content, do not add any additional markup and do not add a code block"""
462
+ 3. Only return the final file content, do not add any additional markup and do not add a code block
463
+ 4. You must return the entire file. It overwrites the existing file."""
413
464
 
414
465
  user_prompt = f"""File: {file_path}
415
466
  File contents:
@@ -434,12 +485,19 @@ Diff to apply:
434
485
  if base_url is None:
435
486
  base_url = os.getenv('GPTDIFF_LLM_BASE_URL', "https://nano-gpt.com/api/v1/")
436
487
  client = OpenAI(api_key=api_key, base_url=base_url)
488
+ start_time = time.time()
437
489
  response = client.chat.completions.create(model=model,
438
490
  messages=messages,
439
491
  temperature=0.0,
440
492
  max_tokens=30000)
493
+ full_response = response.choices[0].message.content
441
494
 
442
- return response.choices[0].message.content
495
+ elapsed = time.time() - start_time
496
+ minutes, seconds = divmod(int(elapsed), 60)
497
+ time_str = f"{minutes}m {seconds}s" if minutes else f"{seconds}s"
498
+ print(f"Smartapply time: {time_str}")
499
+ print("-" * 40)
500
+ return full_response
443
501
 
444
502
  def build_environment_from_filelist(file_list, cwd):
445
503
  """Build environment string from list of file paths"""
@@ -587,8 +645,17 @@ def main():
587
645
  print(f"Skipping binary file {file_path}")
588
646
  return
589
647
 
648
+ print("-" * 40)
649
+ print("SMARTAPPLY")
650
+ print(file_diff)
651
+ print("-" * 40)
590
652
  try:
591
- updated_content = call_llm_for_apply(file_path, original_content, file_diff, args.model)
653
+ updated_content = call_llm_for_apply_with_think_tool_available(file_path, original_content, file_diff, args.model)
654
+
655
+ if updated_content.strip() == "":
656
+ print("Cowardly refusing to write empty file to", file_path, "merge failed")
657
+ return
658
+
592
659
  full_path.parent.mkdir(parents=True, exist_ok=True)
593
660
  full_path.write_text(updated_content)
594
661
  print(f"\033[1;32mSuccessful 'smartapply' update {file_path}.\033[0m")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gptdiff
3
- Version: 0.1.5
3
+ Version: 0.1.6
4
4
  Summary: A tool to generate and apply git diffs using LLMs
5
5
  Author: 255labs
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -0,0 +1,8 @@
1
+ gptdiff/__init__.py,sha256=yGjgwv7tNvH1ZLPsQyoo1CxpTOl1iCAwwDBp-_17ksQ,89
2
+ gptdiff/gptdiff.py,sha256=VAbTBgxGYzxj2ZxRHCnZAneeFfZ8YAJ_qkB_9umnyjg,25689
3
+ gptdiff-0.1.6.dist-info/LICENSE.txt,sha256=zCJk7yUYpMjFvlipi1dKtaljF8WdZ2NASndBYYbU8BY,1228
4
+ gptdiff-0.1.6.dist-info/METADATA,sha256=hMEEWaG6TsauWVkDbtBD05uHtDjrBp3VaCmxXdh2QDk,7316
5
+ gptdiff-0.1.6.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
+ gptdiff-0.1.6.dist-info/entry_points.txt,sha256=0yvXYEVAZFI-p32kQ4-h3qKVWS0a86jsM9FAwF89t9w,49
7
+ gptdiff-0.1.6.dist-info/top_level.txt,sha256=XNkQkQGINaDndEwRxg8qToOrJ9coyfAb-EHrSUXzdCE,8
8
+ gptdiff-0.1.6.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- gptdiff/__init__.py,sha256=yGjgwv7tNvH1ZLPsQyoo1CxpTOl1iCAwwDBp-_17ksQ,89
2
- gptdiff/gptdiff.py,sha256=18BXTs2L8TgiwUCnqDm4GD7CdkFHa-9LoHd1ZQnz1io,23407
3
- gptdiff-0.1.5.dist-info/LICENSE.txt,sha256=zCJk7yUYpMjFvlipi1dKtaljF8WdZ2NASndBYYbU8BY,1228
4
- gptdiff-0.1.5.dist-info/METADATA,sha256=Lb5pD2_Hz3M3nt8dC60O_M0MQBRd-lq9jArcX5isY1Q,7316
5
- gptdiff-0.1.5.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
6
- gptdiff-0.1.5.dist-info/entry_points.txt,sha256=0yvXYEVAZFI-p32kQ4-h3qKVWS0a86jsM9FAwF89t9w,49
7
- gptdiff-0.1.5.dist-info/top_level.txt,sha256=XNkQkQGINaDndEwRxg8qToOrJ9coyfAb-EHrSUXzdCE,8
8
- gptdiff-0.1.5.dist-info/RECORD,,