pdd-cli 0.0.11__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/cli.py CHANGED
@@ -46,7 +46,7 @@ console = Console()
46
46
  @click.option("--review-examples", is_flag=True,
47
47
  help="Review and optionally exclude few-shot examples before command execution.")
48
48
  @click.option('--local', is_flag=True, help='Run commands locally instead of in the cloud.')
49
- @click.version_option(version="0.0.11")
49
+ @click.version_option(version="0.0.12")
50
50
  @click.pass_context
51
51
  def cli(
52
52
  ctx,
pdd/fix_error_loop.py CHANGED
@@ -21,7 +21,7 @@ def escape_brackets(text: str) -> str:
21
21
  def extract_pytest_summary(log_contents: str) -> (int, int, int):
22
22
  """
23
23
  Extract the number of fails, errors and warnings from pytest output.
24
- Try to match a typical summary line first; if not found fall back to individual regex searches.
24
+ Try to match a typical summary line first; if not found, fall back to individual regex searches.
25
25
  Returns a tuple: (fails, errors, warnings)
26
26
  """
27
27
  fails, errors, warnings = sys.maxsize, sys.maxsize, sys.maxsize # defaults if not found
@@ -31,17 +31,16 @@ def extract_pytest_summary(log_contents: str) -> (int, int, int):
31
31
  match = summary_pattern.search(log_contents)
32
32
  if match:
33
33
  fails = int(match.group(1))
34
- # In some pytest outputs, failures and errors may be reported separately.
35
- errors = int(match.group(1)) # assume same value if no distinct errors are provided
34
+ # Some pytest outputs lump failures and errors together, but let's keep them the same if not distinct:
35
+ errors = int(match.group(1))
36
36
  warnings = int(match.group(3))
37
37
  else:
38
38
  failed_match = re.search(r"(\d+)\s+failed", log_contents, re.IGNORECASE)
39
39
  errors_match = re.search(r"(\d+)\s+error", log_contents, re.IGNORECASE)
40
40
  warnings_match = re.search(r"(\d+)\s+warning", log_contents, re.IGNORECASE)
41
41
  fails = int(failed_match.group(1)) if failed_match else 0
42
- errors = int(errors_match.group(1)) if errors_match else 0
42
+ errors = int(errors_match.group(1)) if errors_match else fails
43
43
  warnings = int(warnings_match.group(1)) if warnings_match else 0
44
-
45
44
  return fails, errors, warnings
46
45
 
47
46
  def fix_error_loop(unit_test_file: str,
@@ -55,7 +54,10 @@ def fix_error_loop(unit_test_file: str,
55
54
  error_log_file: str = "error_log.txt",
56
55
  verbose: bool = False):
57
56
  """
58
- Attempt to fix errors in a unit test and corresponding code using repeated iterations.
57
+ Attempt to fix errors in a unit test and corresponding code using repeated iterations,
58
+ counting only the number of times we actually call the LLM fix function. The tests
59
+ are re-run in the same iteration after a fix to see if we've succeeded, so that
60
+ 'attempts' matches the number of fix attempts (not the total test runs).
59
61
 
60
62
  Inputs:
61
63
  unit_test_file: Path to the file containing unit tests.
@@ -64,7 +66,7 @@ def fix_error_loop(unit_test_file: str,
64
66
  verification_program: Path to a Python program that verifies the code still works.
65
67
  strength: float [0,1] representing LLM fix strength.
66
68
  temperature: float [0,1] representing LLM temperature.
67
- max_attempts: Maximum number of iterations for fixes.
69
+ max_attempts: Maximum number of fix attempts.
68
70
  budget: Maximum cost allowed for the fixing process.
69
71
  error_log_file: Path to file to log errors (default: "error_log.txt").
70
72
  verbose: Enable verbose logging (default: False).
@@ -73,7 +75,7 @@ def fix_error_loop(unit_test_file: str,
73
75
  success: Boolean indicating if the overall process succeeded.
74
76
  final_unit_test: String contents of the final unit test file.
75
77
  final_code: String contents of the final code file.
76
- total_attempts: Number of fix attempts made.
78
+ total_attempts: Number of fix attempts actually made.
77
79
  total_cost: Total cost accumulated.
78
80
  model_name: Name of the LLM model used.
79
81
  """
@@ -97,7 +99,8 @@ def fix_error_loop(unit_test_file: str,
97
99
  rprint(f"[red]Error:[/red] Could not remove error log file: {e}")
98
100
  return False, "", "", 0, 0.0, ""
99
101
 
100
- attempt = 0
102
+ # We use fix_attempts to track how many times we actually call the LLM:
103
+ fix_attempts = 0
101
104
  total_cost = 0.0
102
105
  model_name = ""
103
106
  best_iteration_info = {
@@ -109,53 +112,55 @@ def fix_error_loop(unit_test_file: str,
109
112
  "code_backup": None
110
113
  }
111
114
 
112
- # Timestamp for backup naming.
115
+ # For differentiating backup filenames:
113
116
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
114
-
115
- while attempt < max_attempts and total_cost < budget:
116
- attempt += 1
117
- iteration_header = f"=== Attempt {attempt} ==="
117
+
118
+ # We do up to max_attempts fix attempts or until budget is exceeded
119
+ iteration = 0
120
+ while fix_attempts < max_attempts and total_cost < budget:
121
+ iteration += 1
122
+ iteration_header = f"=== Attempt iteration {iteration} ==="
118
123
  rprint(f"[bold blue]{iteration_header}[/bold blue]")
119
- # Append header to error log.
120
124
  with open(error_log_file, "a") as elog:
121
125
  elog.write(f"\n{iteration_header}\n")
122
-
123
- # Step 2a: Run pytest on the unit test file.
126
+
127
+ # 1) Run the unit tests:
124
128
  try:
125
- # Run pytest via subprocess.
126
- # Here we assume that the unit_test_file is discoverable or we pass it explicitly.
127
129
  pytest_cmd = [sys.executable, "-m", "pytest", "-vv", "--no-cov", unit_test_file]
128
130
  result = subprocess.run(pytest_cmd, capture_output=True, text=True)
129
131
  pytest_output = result.stdout + "\n" + result.stderr
130
132
  except Exception as e:
131
133
  rprint(f"[red]Error running pytest:[/red] {e}")
132
- return False, "", "", attempt, total_cost, model_name
134
+ return False, "", "", fix_attempts, total_cost, model_name
133
135
 
134
- # Append the pytest output to the error log file.
136
+ # Append to error log:
135
137
  with open(error_log_file, "a") as elog:
136
138
  elog.write(pytest_output + "\n")
137
-
138
- # Escape square brackets for safe rprint.
139
- output_escaped = escape_brackets(pytest_output)
140
- rprint(f"[magenta]Pytest output:[/magenta]\n{output_escaped}")
141
139
 
142
- # Step 2b: Extract numbers of fails, errors, warnings.
140
+ # Print to console (escaped):
141
+ rprint(f"[magenta]Pytest output:[/magenta]\n{escape_brackets(pytest_output)}")
142
+
143
143
  fails, errors, warnings = extract_pytest_summary(pytest_output)
144
144
  if verbose:
145
145
  rprint(f"[cyan]Iteration summary: {fails} failed, {errors} errors, {warnings} warnings[/cyan]")
146
146
 
147
- # Check if tests passed and there are no warnings.
147
+ # If test is fully successful, we break out:
148
148
  if fails == 0 and errors == 0 and warnings == 0:
149
149
  rprint("[green]All tests passed with no warnings! Exiting loop.[/green]")
150
150
  break
151
151
 
152
- # Step 2c: Create backup copies for unit_test_file and code_file.
152
+ # We only attempt to fix if test is failing or has warnings:
153
+ # Let's create backups:
153
154
  unit_test_dir, unit_test_name = os.path.split(unit_test_file)
154
155
  code_dir, code_name = os.path.split(code_file)
155
- unit_test_backup = os.path.join(unit_test_dir,
156
- f"{os.path.splitext(unit_test_name)[0]}_{attempt}_{errors}_{fails}_{warnings}_{timestamp}.py")
157
- code_backup = os.path.join(code_dir,
158
- f"{os.path.splitext(code_name)[0]}_{attempt}_{errors}_{fails}_{warnings}_{timestamp}.py")
156
+ unit_test_backup = os.path.join(
157
+ unit_test_dir,
158
+ f"{os.path.splitext(unit_test_name)[0]}_{iteration}_{errors}_{fails}_{warnings}_{timestamp}.py"
159
+ )
160
+ code_backup = os.path.join(
161
+ code_dir,
162
+ f"{os.path.splitext(code_name)[0]}_{iteration}_{errors}_{fails}_{warnings}_{timestamp}.py"
163
+ )
159
164
  try:
160
165
  shutil.copy(unit_test_file, unit_test_backup)
161
166
  shutil.copy(code_file, code_backup)
@@ -164,24 +169,22 @@ def fix_error_loop(unit_test_file: str,
164
169
  rprint(f"[green]Created backup for code file:[/green] {code_backup}")
165
170
  except Exception as e:
166
171
  rprint(f"[red]Error creating backup files:[/red] {e}")
167
- return False, "", "", attempt, total_cost, model_name
172
+ return False, "", "", fix_attempts, total_cost, model_name
168
173
 
169
- # Update best_iteration tracker if this iteration has fewer errors, fails, warnings.
174
+ # Update best iteration if needed:
170
175
  if (errors < best_iteration_info["errors"] or
171
- (errors == best_iteration_info["errors"] and fails < best_iteration_info["fails"]) or
172
- (errors == best_iteration_info["errors"] and fails == best_iteration_info["fails"] and warnings < best_iteration_info["warnings"])):
176
+ (errors == best_iteration_info["errors"] and fails < best_iteration_info["fails"]) or
177
+ (errors == best_iteration_info["errors"] and fails == best_iteration_info["fails"] and warnings < best_iteration_info["warnings"])):
173
178
  best_iteration_info = {
174
- "attempt": attempt,
179
+ "attempt": iteration,
175
180
  "fails": fails,
176
181
  "errors": errors,
177
182
  "warnings": warnings,
178
183
  "unit_test_backup": unit_test_backup,
179
184
  "code_backup": code_backup
180
185
  }
181
- if verbose:
182
- rprint(f"[cyan]Updated best iteration to attempt {attempt} (errors: {errors}, fails: {fails}, warnings: {warnings}).[/cyan]")
183
-
184
- # Step 2d: Read file contents.
186
+
187
+ # Read file contents:
185
188
  try:
186
189
  with open(unit_test_file, "r") as f:
187
190
  unit_test_contents = f.read()
@@ -189,16 +192,15 @@ def fix_error_loop(unit_test_file: str,
189
192
  code_contents = f.read()
190
193
  except Exception as e:
191
194
  rprint(f"[red]Error reading input files:[/red] {e}")
192
- return False, "", "", attempt, total_cost, model_name
195
+ return False, "", "", fix_attempts, total_cost, model_name
193
196
 
194
- # Call the internal fix_errors_from_unit_tests function.
197
+ # Call fix:
195
198
  try:
196
- (updated_unit_test,
197
- updated_code,
198
- fixed_unit_test,
199
- fixed_code,
200
- cost,
201
- model_name) = fix_errors_from_unit_tests(
199
+ # read error log file into pytest_output so it has history of all previous attempts:
200
+ with open(error_log_file, "r") as f:
201
+ pytest_output = f.read()
202
+
203
+ updated_unit_test, updated_code, fixed_unit_test, fixed_code, cost, model_name = fix_errors_from_unit_tests(
202
204
  unit_test_contents,
203
205
  code_contents,
204
206
  prompt,
@@ -212,7 +214,7 @@ def fix_error_loop(unit_test_file: str,
212
214
  rprint(f"[red]Error during fix_errors_from_unit_tests call:[/red] {e}")
213
215
  break
214
216
 
215
- # Add cost.
217
+ fix_attempts += 1 # We used one fix attempt
216
218
  total_cost += cost
217
219
  if verbose:
218
220
  rprint(f"[cyan]Iteration fix cost: ${cost:.6f}, Total cost: ${total_cost:.6f}[/cyan]")
@@ -220,65 +222,95 @@ def fix_error_loop(unit_test_file: str,
220
222
  rprint(f"[red]Exceeded the budget of ${budget:.6f}. Ending fixing loop.[/red]")
221
223
  break
222
224
 
223
- # If neither unit test nor code was updated, likely no changes were needed.
224
- if not updated_unit_test and not updated_code:
225
- rprint("[yellow]No changes were suggested by the LLM. Exiting loop.[/yellow]")
226
- break
225
+ # Even if no changes, the tests require we continue up to max_attempts
226
+ # so skip the old "break if no changes" logic.
227
227
 
228
- # Step 2e: If updated_unit_test is True, write the updates back.
228
+ # If updated_unit_test is True, write to file:
229
229
  if updated_unit_test:
230
230
  try:
231
231
  with open(unit_test_file, "w") as f:
232
232
  f.write(fixed_unit_test)
233
233
  if verbose:
234
- rprint(f"[green]Unit test file updated.[/green]")
234
+ rprint("[green]Unit test file updated.[/green]")
235
235
  except Exception as e:
236
- rprint(f"[red]Error writing updated unit test file:[/red] {e}")
236
+ rprint(f"[red]Error writing updated unit test:[/red] {e}")
237
237
  break
238
238
 
239
- # Increment attempt counter is already performed at loop start.
240
- # Step 2f: If updated_code is True, update code file and verify.
239
+ # If updated_code is True, write it and run verification:
241
240
  if updated_code:
242
241
  try:
243
242
  with open(code_file, "w") as f:
244
243
  f.write(fixed_code)
245
244
  if verbose:
246
- rprint(f"[green]Code file updated.[/green]")
245
+ rprint("[green]Code file updated.[/green]")
247
246
  except Exception as e:
248
247
  rprint(f"[red]Error writing updated code file:[/red] {e}")
249
248
  break
250
249
 
251
- # Run the verification program.
250
+ # Run the verification:
252
251
  try:
253
252
  verify_cmd = [sys.executable, verification_program]
254
253
  verify_result = subprocess.run(verify_cmd, capture_output=True, text=True)
255
- verify_output = verify_result.stdout + "\n" + verify_result.stderr
254
+ # Safely handle None for stdout or stderr:
255
+ verify_stdout = verify_result.stdout or ""
256
+ verify_stderr = verify_result.stderr or ""
257
+ verify_output = verify_stdout + "\n" + verify_stderr
256
258
  except Exception as e:
257
259
  rprint(f"[red]Error running verification program:[/red] {e}")
258
260
  verify_output = f"Verification program error: {e}"
259
261
 
260
- # Log verification output.
261
262
  with open(error_log_file, "a") as elog:
262
- elog.write(f"\n[Verification attempt at iteration {attempt}]\n")
263
+ elog.write(f"\n[Verification attempt at iteration {iteration}]\n")
263
264
  elog.write(verify_output + "\n")
265
+
264
266
  rprint(f"[blue]Verification program output:[/blue]\n{escape_brackets(verify_output)}")
265
267
 
266
- # Check if verification failed. Assume non-zero return code indicates failure.
267
268
  if verify_result.returncode != 0:
268
- rprint(f"[red]Verification failed. Restoring last working code file from backup.[/red]")
269
+ rprint("[red]Verification failed. Restoring last working code file from backup.[/red]")
269
270
  try:
270
- # Restore code file from the backup of this iteration.
271
271
  shutil.copy(code_backup, code_file)
272
272
  with open(error_log_file, "a") as elog:
273
273
  elog.write(f"Restored code file from backup: {code_backup}\n")
274
274
  except Exception as e:
275
275
  rprint(f"[red]Error restoring backup code file:[/red] {e}")
276
276
  break
277
- continue # Continue next loop iteration after restore.
277
+ # We do NOT break or exit this for-loop; let next iteration attempt to fix again.
278
278
 
279
- # End of while loop iteration.
280
-
281
- # Step 4: After loop, run pytest one last time.
279
+ # IMPORTANT: Re-run the tests in the *same* iteration to see if we have fixed the problem:
280
+ # So that if the new code or new test is good, we can break out with exactly one fix_attempt.
281
+ try:
282
+ second_run_result = subprocess.run(pytest_cmd, capture_output=True, text=True)
283
+ second_run_output = second_run_result.stdout + "\n" + second_run_result.stderr
284
+ except Exception as e:
285
+ rprint(f"[red]Error running second pytest attempt in iteration {iteration}:[/red] {e}")
286
+ return False, "", "", fix_attempts, total_cost, model_name
287
+
288
+ with open(error_log_file, "a") as elog:
289
+ elog.write("\n=== Second Pytest Check (same iteration) ===\n")
290
+ elog.write(second_run_output + "\n")
291
+
292
+ rprint(f"[magenta]Second pytest check:[/magenta]\n{escape_brackets(second_run_output)}")
293
+
294
+ fails2, errors2, warnings2 = extract_pytest_summary(second_run_output)
295
+ if fails2 == 0 and errors2 == 0 and warnings2 == 0:
296
+ rprint("[green]All tests passed on the second run of this iteration! Exiting loop.[/green]")
297
+ break
298
+ else:
299
+ # Update best iteration if needed:
300
+ if (errors2 < best_iteration_info["errors"] or
301
+ (errors2 == best_iteration_info["errors"] and fails2 < best_iteration_info["fails"]) or
302
+ (errors2 == best_iteration_info["errors"] and fails2 == best_iteration_info["fails"] and warnings2 < best_iteration_info["warnings"])):
303
+ best_iteration_info = {
304
+ "attempt": iteration,
305
+ "fails": fails2,
306
+ "errors": errors2,
307
+ "warnings": warnings2,
308
+ "unit_test_backup": unit_test_backup,
309
+ "code_backup": code_backup
310
+ }
311
+ # If still not passing, we simply continue to the next iteration in the while loop.
312
+
313
+ # After we exit the while or exceed attempts/budget, run pytest once more to get final stats:
282
314
  try:
283
315
  final_pytest_cmd = [sys.executable, "-m", "pytest", "-vv", "--no-cov", unit_test_file]
284
316
  final_result = subprocess.run(final_pytest_cmd, capture_output=True, text=True)
@@ -287,27 +319,41 @@ def fix_error_loop(unit_test_file: str,
287
319
  rprint(f"[red]Error running final pytest:[/red] {e}")
288
320
  final_output = f"Error: {e}"
289
321
 
290
- # Append final output to error log.
291
322
  with open(error_log_file, "a") as elog:
292
323
  elog.write("\n=== Final Pytest Run ===\n")
293
324
  elog.write(final_output + "\n")
325
+
294
326
  rprint(f"[blue]Final pytest output:[/blue]\n{escape_brackets(final_output)}")
295
-
296
- # Step 5: If the last iteration is not the best, restore the best iteration backups.
297
- best_attempt = best_iteration_info.get("attempt")
298
- if best_attempt is not None:
299
- # Optionally compare the last iteration numbers with best_iteration_info here.
300
- if verbose:
301
- rprint(f"[cyan]Restoring best iteration ({best_attempt}) from backups.[/cyan]")
302
- try:
303
- if best_iteration_info["unit_test_backup"]:
304
- shutil.copy(best_iteration_info["unit_test_backup"], unit_test_file)
305
- if best_iteration_info["code_backup"]:
306
- shutil.copy(best_iteration_info["code_backup"], code_file)
307
- except Exception as e:
308
- rprint(f"[red]Error restoring best iteration backups:[/red] {e}")
309
-
310
- # Read final file contents.
327
+
328
+ # Possibly restore best iteration if the final run is not the best:
329
+ # The prompt says: "If the last run isn't the best iteration, restore the best."
330
+ final_fails, final_errors, final_warnings = extract_pytest_summary(final_output)
331
+ if best_iteration_info["attempt"] is not None:
332
+ # Compare final run to best iteration:
333
+ is_better_final = False
334
+ # If final has strictly fewer errors, or tie then fewer fails, or tie then fewer warnings => keep final
335
+ if final_errors < best_iteration_info["errors"]:
336
+ is_better_final = True
337
+ elif final_errors == best_iteration_info["errors"] and final_fails < best_iteration_info["fails"]:
338
+ is_better_final = True
339
+ elif (final_errors == best_iteration_info["errors"] and
340
+ final_fails == best_iteration_info["fails"] and
341
+ final_warnings < best_iteration_info["warnings"]):
342
+ is_better_final = True
343
+
344
+ if not is_better_final:
345
+ # restore
346
+ if verbose:
347
+ rprint(f"[cyan]Restoring best iteration ({best_iteration_info['attempt']}) from backups.[/cyan]")
348
+ try:
349
+ if best_iteration_info["unit_test_backup"]:
350
+ shutil.copy(best_iteration_info["unit_test_backup"], unit_test_file)
351
+ if best_iteration_info["code_backup"]:
352
+ shutil.copy(best_iteration_info["code_backup"], code_file)
353
+ except Exception as e:
354
+ rprint(f"[red]Error restoring best iteration backups:[/red] {e}")
355
+
356
+ # Read final file contents
311
357
  try:
312
358
  with open(unit_test_file, "r") as f:
313
359
  final_unit_test = f.read()
@@ -317,15 +363,15 @@ def fix_error_loop(unit_test_file: str,
317
363
  rprint(f"[red]Error reading final files:[/red] {e}")
318
364
  final_unit_test, final_code = "", ""
319
365
 
320
- # Determine success based on final pytest result: pass if no failures, errors or warnings.
366
+ # Check final results for success (no fails, no errors, no warnings)
321
367
  final_fails, final_errors, final_warnings = extract_pytest_summary(final_output)
322
368
  success = (final_fails == 0 and final_errors == 0 and final_warnings == 0)
323
369
  if success:
324
370
  rprint("[green]Final tests passed with no warnings.[/green]")
325
371
  else:
326
372
  rprint("[red]Final tests still failing or producing warnings.[/red]")
327
-
328
- return success, final_unit_test, final_code, attempt, total_cost, model_name
373
+
374
+ return success, final_unit_test, final_code, fix_attempts, total_cost, model_name
329
375
 
330
376
  # If this module is run directly for testing purposes:
331
377
  if __name__ == "__main__":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: pdd-cli
3
- Version: 0.0.11
3
+ Version: 0.0.12
4
4
  Summary: PDD (Prompt-Driven Development) Command Line Interface
5
5
  Author: Greg Tanaka
6
6
  Author-email: glt@alumni.caltech.edu
@@ -40,7 +40,7 @@ Requires-Dist: semver==3.0.2
40
40
  Requires-Dist: setuptools==75.1.0
41
41
  Requires-Dist: python-Levenshtein
42
42
 
43
- .. image:: https://img.shields.io/badge/pdd--cli-v0.0.11-blue
43
+ .. image:: https://img.shields.io/badge/pdd--cli-v0.0.12-blue
44
44
  :alt: PDD-CLI Version
45
45
 
46
46
  PDD (Prompt-Driven Development) Command Line Interface
@@ -101,7 +101,7 @@ After installation, verify:
101
101
 
102
102
  pdd --version
103
103
 
104
- You'll see the current PDD version (e.g., 0.0.11).
104
+ You'll see the current PDD version (e.g., 0.0.12).
105
105
 
106
106
  Advanced Installation Tips
107
107
  --------------------------
@@ -6,7 +6,7 @@ pdd/bug_main.py,sha256=myKU9--QWdkV4Wf3mD2PoLPJFNgRjwf4z8s7TC28G_s,3720
6
6
  pdd/bug_to_unit_test.py,sha256=dsJNm6qAwx-m7RvFF5RquFJRzxzZGCWT4IKYnzVCUws,5569
7
7
  pdd/change.py,sha256=iqjWS5DrQ73yMkuUQlwIRIFlofmKdaK6t6-v3zHKL-4,4985
8
8
  pdd/change_main.py,sha256=yL_i1Ws5vt4vAkWiC826csNi2cHP6wKbwe_PfMqbbPY,11407
9
- pdd/cli.py,sha256=xUoeKxEJDJuASejFBJsYc9XdehRFVZmpqiXQvqml-1o,16593
9
+ pdd/cli.py,sha256=Pw-bz_PIuarQNb4hORmgXupwKPGc5hH7xOklAtFatDo,16593
10
10
  pdd/cmd_test_main.py,sha256=aSCxRnSurg15AvPcJDAPp9xy8p_qqnjU1oV14Hi2R54,5301
11
11
  pdd/code_generator.py,sha256=n5akrX7VPe71X4RsD6kKqAVvzBLMlciJI4RtJA1PcgA,4375
12
12
  pdd/code_generator_main.py,sha256=G2eRBPXc1cGszkk0PbIPmJZHPaf_dw5d2yZbsvQZA3c,4793
@@ -23,7 +23,7 @@ pdd/detect_change_main.py,sha256=1Z4ymhjJaVr2aliGyqkqeqSmQ7QMgcl23p0wdsmBas0,365
23
23
  pdd/find_section.py,sha256=lz_FPY4KDCRAGlL1pWVZiutUNv7E4KsDFK-ymDWA_Ec,962
24
24
  pdd/fix_code_loop.py,sha256=L0yxq2yAziPIyFGb8lIP2mvufu8a_gtc5nnN2LuMuKs,8596
25
25
  pdd/fix_code_module_errors.py,sha256=M6AnlR2jF5LI-nNg6gIO5LvSkxiaLIUGyTvfnUfe1cU,4625
26
- pdd/fix_error_loop.py,sha256=EtjqF9e4DVFQ0hh8fsKGYMqYwmN24yOHtziPMZFcvrA,15889
26
+ pdd/fix_error_loop.py,sha256=Ca8OPag4JHAR4QwaC4ntPgkdkVHtx1HNXynJrZr6tz4,18296
27
27
  pdd/fix_errors_from_unit_tests.py,sha256=8qCEyHZ6lUSBtV9vhQyhgAxDuhngmOy7vVy2HObckd0,8934
28
28
  pdd/fix_main.py,sha256=02OIViH12BcsykpDp4Osxw2ndEeThnNakMFkzdpYr48,5333
29
29
  pdd/generate_output_paths.py,sha256=zz42GTx9eGyWIYSl3jcWvtJRGnieC3eoPM6DIVcWz2k,7219
@@ -89,9 +89,9 @@ pdd/prompts/trim_results_start_LLM.prompt,sha256=WwFlOHha4wzMLtRHDMI6GtcNdl2toE8
89
89
  pdd/prompts/unfinished_prompt_LLM.prompt,sha256=-JgBpiPTQZdWOAwOG1XpfpD9waynFTAT3Jo84eQ4bTw,1543
90
90
  pdd/prompts/update_prompt_LLM.prompt,sha256=_lGaxeVP4oF8yGqiN6yj6UE0j79lxfGdjsYr5w5KSYk,1261
91
91
  pdd/prompts/xml_convertor_LLM.prompt,sha256=YGRGXJeg6EhM9690f-SKqQrKqSJjLFD51UrPOlO0Frg,2786
92
- pdd_cli-0.0.11.dist-info/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
93
- pdd_cli-0.0.11.dist-info/METADATA,sha256=le4lAMSFfByKscN2rUhNUDi3IPInwYR3u41a5ZsfZfM,6808
94
- pdd_cli-0.0.11.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
95
- pdd_cli-0.0.11.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
96
- pdd_cli-0.0.11.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
97
- pdd_cli-0.0.11.dist-info/RECORD,,
92
+ pdd_cli-0.0.12.dist-info/LICENSE,sha256=-1bjYH-CEjGEQ8VixtnRYuu37kN6F9NxmZSDkBuUQ9o,1062
93
+ pdd_cli-0.0.12.dist-info/METADATA,sha256=HCmK9fJ69EhzkytWSrBX1yeilwuAJPWNveli1foXF38,6808
94
+ pdd_cli-0.0.12.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
95
+ pdd_cli-0.0.12.dist-info/entry_points.txt,sha256=Kr8HtNVb8uHZtQJNH4DnF8j7WNgWQbb7_Pw5hECSR-I,36
96
+ pdd_cli-0.0.12.dist-info/top_level.txt,sha256=xjnhIACeMcMeDfVNREgQZl4EbTni2T11QkL5r7E-sbE,4
97
+ pdd_cli-0.0.12.dist-info/RECORD,,