pdd-cli 0.0.39__py3-none-any.whl → 0.0.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/cli.py CHANGED
@@ -1,4 +1,10 @@
1
1
  # pdd/cli.py
2
+ """
3
+ Command Line Interface (CLI) for the PDD (Prompt-Driven Development) tool.
4
+
5
+ This module provides the main CLI functionality for PDD, including commands for
6
+ generating code, tests, fixing issues, and managing prompts.
7
+ """
2
8
  from __future__ import annotations
3
9
 
4
10
  import os
@@ -47,23 +53,23 @@ custom_theme = Theme({
47
53
  console = Console(theme=custom_theme)
48
54
 
49
55
  # --- Helper Function for Error Handling ---
50
- def handle_error(e: Exception, command_name: str, quiet: bool):
56
+ def handle_error(exception: Exception, command_name: str, quiet: bool):
51
57
  """Prints error messages using Rich console.""" # Modified docstring
52
58
  if not quiet:
53
59
  console.print(f"[error]Error during '{command_name}' command:[/error]", style="error")
54
- if isinstance(e, FileNotFoundError):
55
- console.print(f" [error]File not found:[/error] {e}", style="error")
56
- elif isinstance(e, (ValueError, IOError)):
57
- console.print(f" [error]Input/Output Error:[/error] {e}", style="error")
58
- elif isinstance(e, click.UsageError): # Handle Click usage errors explicitly if needed
59
- console.print(f" [error]Usage Error:[/error] {e}", style="error")
60
+ if isinstance(exception, FileNotFoundError):
61
+ console.print(f" [error]File not found:[/error] {exception}", style="error")
62
+ elif isinstance(exception, (ValueError, IOError)):
63
+ console.print(f" [error]Input/Output Error:[/error] {exception}", style="error")
64
+ elif isinstance(exception, click.UsageError): # Handle Click usage errors explicitly if needed
65
+ console.print(f" [error]Usage Error:[/error] {exception}", style="error")
60
66
  # click.UsageError should typically exit with 2, but we are handling it.
61
- elif isinstance(e, MarkupError):
67
+ elif isinstance(exception, MarkupError):
62
68
  console.print(" [error]Markup Error:[/error] Invalid Rich markup encountered.", style="error")
63
69
  # Print the error message safely escaped
64
- console.print(escape(str(e)))
70
+ console.print(escape(str(exception)))
65
71
  else:
66
- console.print(f" [error]An unexpected error occurred:[/error] {e}", style="error")
72
+ console.print(f" [error]An unexpected error occurred:[/error] {exception}", style="error")
67
73
  # Do NOT re-raise e here. Let the command function return None.
68
74
 
69
75
 
@@ -170,9 +176,12 @@ def cli(
170
176
  console.print("[info]Checking for updates...[/info]")
171
177
  # Removed quiet=quiet argument as it caused TypeError
172
178
  auto_update()
173
- except Exception as e:
179
+ except Exception as exception: # Using more descriptive name
174
180
  if not quiet:
175
- console.print(f"[warning]Auto-update check failed:[/warning] {e}", style="warning")
181
+ console.print(
182
+ f"[warning]Auto-update check failed:[/warning] {exception}",
183
+ style="warning"
184
+ )
176
185
 
177
186
  # --- Result Callback for Chained Commands ---
178
187
  @cli.result_callback()
@@ -200,45 +209,45 @@ def process_commands(ctx: click.Context, results: List[Optional[Tuple[Any, float
200
209
 
201
210
  # Check if the command failed (returned None)
202
211
  if result_tuple is None:
203
- if not ctx.obj.get("quiet"):
204
- # Check if it was install_completion (which normally returns None)
205
- if command_name == "install_completion":
206
- console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed.")
207
- # If command name is unknown, and it might be install_completion which prints its own status
208
- elif command_name.startswith("Unknown Command"):
209
- console.print(f" [info]Step {i+1} ({command_name}):[/info] Command executed (see output above for status details).")
210
- # Check if it was preprocess (which returns a dummy tuple on success)
211
- # This case handles actual failure for preprocess
212
- elif command_name == "preprocess":
213
- console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
214
- else:
215
- console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
212
+ if not ctx.obj.get("quiet"):
213
+ # Check if it was install_completion (which normally returns None)
214
+ if command_name == "install_completion":
215
+ console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed.")
216
+ # If command name is unknown, and it might be install_completion which prints its own status
217
+ elif command_name.startswith("Unknown Command"):
218
+ console.print(f" [info]Step {i+1} ({command_name}):[/info] Command executed (see output above for status details).")
219
+ # Check if it was preprocess (which returns a dummy tuple on success)
220
+ # This case handles actual failure for preprocess
221
+ elif command_name == "preprocess":
222
+ console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
223
+ else:
224
+ console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
216
225
  # Check if the result is the expected tuple structure from @track_cost or preprocess success
217
226
  elif isinstance(result_tuple, tuple) and len(result_tuple) == 3:
218
227
  _result_data, cost, model_name = result_tuple
219
228
  total_chain_cost += cost
220
229
  if not ctx.obj.get("quiet"):
221
- # Special handling for preprocess success message (check actual command name)
222
- actual_command_name = invoked_subcommands[i] if i < num_commands else None # Get actual name if possible
223
- if actual_command_name == "preprocess" and cost == 0.0 and model_name == "local":
224
- console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed (local).")
225
- else:
226
- # Generic output using potentially "Unknown Command" name
227
- console.print(f" [info]Step {i+1} ({command_name}):[/info] Cost: ${cost:.6f}, Model: {model_name}")
230
+ # Special handling for preprocess success message (check actual command name)
231
+ actual_command_name = invoked_subcommands[i] if i < num_commands else None # Get actual name if possible
232
+ if actual_command_name == "preprocess" and cost == 0.0 and model_name == "local":
233
+ console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed (local).")
234
+ else:
235
+ # Generic output using potentially "Unknown Command" name
236
+ console.print(f" [info]Step {i+1} ({command_name}):[/info] Cost: ${cost:.6f}, Model: {model_name}")
228
237
  else:
229
238
  # Handle unexpected return types if necessary
230
239
  if not ctx.obj.get("quiet"):
231
- # Provide more detail on the unexpected type
232
- console.print(f" [warning]Step {i+1} ({command_name}):[/warning] Unexpected result format: {type(result_tuple).__name__} - {str(result_tuple)[:50]}...")
240
+ # Provide more detail on the unexpected type
241
+ console.print(f" [warning]Step {i+1} ({command_name}):[/warning] Unexpected result format: {type(result_tuple).__name__} - {str(result_tuple)[:50]}...")
233
242
 
234
243
 
235
244
  if not ctx.obj.get("quiet"):
236
245
  # Only print total cost if at least one command potentially contributed cost
237
246
  if any(res is not None and isinstance(res, tuple) and len(res) == 3 for res in results):
238
- console.print(f"[info]Total Estimated Cost for Chain:[/info] ${total_chain_cost:.6f}")
247
+ console.print(f"[info]Total Estimated Cost for Chain:[/info] ${total_chain_cost:.6f}")
239
248
  # Indicate if the chain might have been incomplete due to errors
240
249
  if num_results < num_commands and not all(res is None for res in results): # Avoid printing if all failed
241
- console.print("[warning]Note: Chain may have terminated early due to errors.[/warning]")
250
+ console.print("[warning]Note: Chain may have terminated early due to errors.[/warning]")
242
251
  console.print("[info]-------------------------------------[/info]")
243
252
 
244
253
 
@@ -248,7 +257,7 @@ def process_commands(ctx: click.Context, results: List[Optional[Tuple[Any, float
248
257
  @click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
249
258
  @click.option(
250
259
  "--output",
251
- type=click.Path(writable=True), # Allows file or dir
260
+ type=click.Path(writable=True),
252
261
  default=None,
253
262
  help="Specify where to save the generated code (file or directory).",
254
263
  )
@@ -274,10 +283,8 @@ def generate(
274
283
  output: Optional[str],
275
284
  original_prompt_file_path: Optional[str],
276
285
  force_incremental_flag: bool,
277
- ) -> Optional[Tuple[str, float, str]]: # Modified return type
278
- """Create runnable code from a prompt file."""
279
- quiet = ctx.obj.get("quiet", False)
280
- command_name = "generate"
286
+ ) -> Optional[Tuple[str, float, str]]:
287
+ """Generate code from a prompt file."""
281
288
  try:
282
289
  generated_code, incremental, total_cost, model_name = code_generator_main(
283
290
  ctx=ctx,
@@ -287,9 +294,9 @@ def generate(
287
294
  force_incremental_flag=force_incremental_flag,
288
295
  )
289
296
  return generated_code, total_cost, model_name
290
- except Exception as e:
291
- handle_error(e, command_name, quiet)
292
- return None # Return None on failure
297
+ except Exception as exception:
298
+ handle_error(exception, "generate", ctx.obj.get("quiet", False))
299
+ return None
293
300
 
294
301
 
295
302
  @cli.command("example")
@@ -303,10 +310,13 @@ def generate(
303
310
  )
304
311
  @click.pass_context
305
312
  @track_cost
306
- def example(ctx: click.Context, prompt_file: str, code_file: str, output: Optional[str]) -> Optional[Tuple[str, float, str]]: # Modified return type
307
- """Create a compact example demonstrating functionality."""
308
- quiet = ctx.obj.get("quiet", False)
309
- command_name = "example"
313
+ def example(
314
+ ctx: click.Context,
315
+ prompt_file: str,
316
+ code_file: str,
317
+ output: Optional[str]
318
+ ) -> Optional[Tuple[str, float, str]]:
319
+ """Generate example code for a given prompt and implementation."""
310
320
  try:
311
321
  example_code, total_cost, model_name = context_generator_main(
312
322
  ctx=ctx,
@@ -315,9 +325,9 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
315
325
  output=output,
316
326
  )
317
327
  return example_code, total_cost, model_name
318
- except Exception as e:
319
- handle_error(e, command_name, quiet)
320
- return None # Return None on failure
328
+ except Exception as exception:
329
+ handle_error(exception, "example", ctx.obj.get("quiet", False))
330
+ return None
321
331
 
322
332
 
323
333
  @cli.command("test")
@@ -329,7 +339,12 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
329
339
  default=None,
330
340
  help="Specify where to save the generated test file (file or directory).",
331
341
  )
332
- @click.option("--language", type=str, default=None, help="Specify the programming language.")
342
+ @click.option(
343
+ "--language",
344
+ type=str,
345
+ default=None,
346
+ help="Specify the programming language."
347
+ )
333
348
  @click.option(
334
349
  "--coverage-report",
335
350
  type=click.Path(exists=True, dir_okay=False),
@@ -345,7 +360,7 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
345
360
  @click.option(
346
361
  "--target-coverage",
347
362
  type=click.FloatRange(0.0, 100.0),
348
- default=None, # Use None, default handled in cmd_test_main or env var
363
+ default=None, # Use None, default handled in cmd_test_main or env var
349
364
  help="Desired code coverage percentage (default: 90.0 or PDD_TEST_COVERAGE_TARGET).",
350
365
  )
351
366
  @click.option(
@@ -366,12 +381,10 @@ def test(
366
381
  existing_tests: Optional[str],
367
382
  target_coverage: Optional[float],
368
383
  merge: bool,
369
- ) -> Optional[Tuple[str, float, str]]: # Modified return type
370
- """Generate or enhance unit tests."""
371
- quiet = ctx.obj.get("quiet", False)
372
- command_name = "test"
384
+ ) -> Optional[Tuple[str, float, str]]:
385
+ """Generate unit tests for a given prompt and implementation."""
373
386
  try:
374
- generated_test_code, total_cost, model_name = cmd_test_main(
387
+ test_code, total_cost, model_name = cmd_test_main(
375
388
  ctx=ctx,
376
389
  prompt_file=prompt_file,
377
390
  code_file=code_file,
@@ -382,10 +395,10 @@ def test(
382
395
  target_coverage=target_coverage,
383
396
  merge=merge,
384
397
  )
385
- return generated_test_code, total_cost, model_name
386
- except Exception as e:
387
- handle_error(e, command_name, quiet)
388
- return None # Return None on failure
398
+ return test_code, total_cost, model_name
399
+ except Exception as exception:
400
+ handle_error(exception, "test", ctx.obj.get("quiet", False))
401
+ return None
389
402
 
390
403
 
391
404
  @cli.command("preprocess")
@@ -430,12 +443,12 @@ def preprocess(
430
443
  recursive: bool,
431
444
  double: bool,
432
445
  exclude: Optional[Tuple[str, ...]],
433
- ) -> Optional[Tuple[str, float, str]]: # Modified return type (Optional)
434
- """Preprocess prompt files and save the results."""
435
- quiet = ctx.obj.get("quiet", False)
436
- command_name = "preprocess"
446
+ ) -> Optional[Tuple[str, float, str]]:
447
+ """Preprocess a prompt file to prepare it for LLM use."""
437
448
  try:
438
- preprocess_main(
449
+ # Since preprocess is a local operation, we don't track cost
450
+ # But we need to return a tuple in the expected format for result callback
451
+ result = preprocess_main(
439
452
  ctx=ctx,
440
453
  prompt_file=prompt_file,
441
454
  output=output,
@@ -444,18 +457,25 @@ def preprocess(
444
457
  double=double,
445
458
  exclude=list(exclude) if exclude else [],
446
459
  )
447
- # Return dummy values ONLY on success
448
- return "Preprocessing complete.", 0.0, "local"
449
- except Exception as e:
450
- handle_error(e, command_name, quiet)
451
- return None # Return None on failure
460
+
461
+ # Handle the result from preprocess_main
462
+ if result is None:
463
+ # If preprocess_main returns None, still return a dummy tuple for the callback
464
+ return "", 0.0, "local"
465
+ else:
466
+ # Unpack the return value from preprocess_main
467
+ processed_prompt, total_cost, model_name = result
468
+ return processed_prompt, total_cost, model_name
469
+ except Exception as exception:
470
+ handle_error(exception, "preprocess", ctx.obj.get("quiet", False))
471
+ return None
452
472
 
453
473
 
454
474
  @cli.command("fix")
455
475
  @click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
456
476
  @click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
457
477
  @click.argument("unit_test_file", type=click.Path(exists=True, dir_okay=False))
458
- @click.argument("error_file", type=click.Path(dir_okay=False)) # Allow non-existent for loop mode
478
+ @click.argument("error_file", type=click.Path(dir_okay=False)) # Allow non-existent for loop mode
459
479
  @click.option(
460
480
  "--output-test",
461
481
  type=click.Path(writable=True),
@@ -474,7 +494,12 @@ def preprocess(
474
494
  default=None,
475
495
  help="Specify where to save the results log (file or directory).",
476
496
  )
477
- @click.option("--loop", is_flag=True, default=False, help="Enable iterative fixing process.")
497
+ @click.option(
498
+ "--loop",
499
+ is_flag=True,
500
+ default=False,
501
+ help="Enable iterative fixing process."
502
+ )
478
503
  @click.option(
479
504
  "--verification-program",
480
505
  type=click.Path(exists=True, dir_okay=False),
@@ -502,7 +527,7 @@ def preprocess(
502
527
  help="Automatically submit the example if all unit tests pass.",
503
528
  )
504
529
  @click.pass_context
505
- @track_cost # fix_main returns cost/model info
530
+ @track_cost
506
531
  def fix(
507
532
  ctx: click.Context,
508
533
  prompt_file: str,
@@ -517,15 +542,11 @@ def fix(
517
542
  max_attempts: int,
518
543
  budget: float,
519
544
  auto_submit: bool,
520
- ) -> Optional[Tuple[Dict[str, Any], float, str]]: # Modified return type
521
- """Fix errors in code and unit tests based on error messages."""
522
- quiet = ctx.obj.get("quiet", False)
523
- command_name = "fix"
545
+ ) -> Optional[Tuple[Dict[str, Any], float, str]]:
546
+ """Fix code based on a prompt and unit test errors."""
524
547
  try:
525
-
526
- # fix_main returns: success, fixed_test_content, fixed_code_content, attempts, cost, model
527
- # We need to adapt this to the (result, cost, model) structure for the callback
528
- success, fixed_test, fixed_code, attempts, cost, model = fix_main(
548
+ # The actual logic is in fix_main
549
+ success, fixed_unit_test, fixed_code, attempts, total_cost, model_name = fix_main(
529
550
  ctx=ctx,
530
551
  prompt_file=prompt_file,
531
552
  code_file=code_file,
@@ -540,18 +561,16 @@ def fix(
540
561
  budget=budget,
541
562
  auto_submit=auto_submit,
542
563
  )
543
- # Package results into a dictionary for the first element of the tuple
544
- result_data = {
564
+ result = {
545
565
  "success": success,
566
+ "fixed_unit_test": fixed_unit_test,
567
+ "fixed_code": fixed_code,
546
568
  "attempts": attempts,
547
- "fixed_test_path": output_test,
548
- "fixed_code_path": output_code,
549
- "results_log_path": output_results,
550
569
  }
551
- return result_data, cost, model
552
- except Exception as e:
553
- handle_error(e, command_name, quiet)
554
- return None # Return None on failure
570
+ return result, total_cost, model_name
571
+ except Exception as exception:
572
+ handle_error(exception, "fix", ctx.obj.get("quiet", False))
573
+ return None
555
574
 
556
575
 
557
576
  @cli.command("split")
pdd/cmd_test_main.py CHANGED
@@ -1,24 +1,28 @@
1
- from typing import Optional, Tuple
1
+ """
2
+ Main entry point for the 'test' command.
3
+ """
4
+ from __future__ import annotations
2
5
  import click
6
+ # pylint: disable=redefined-builtin
3
7
  from rich import print
4
- from rich.progress import track
5
- import os
6
8
 
7
9
  from .construct_paths import construct_paths
8
10
  from .generate_test import generate_test
9
11
  from .increase_tests import increase_tests
10
12
 
13
+
14
+ # pylint: disable=too-many-arguments, too-many-locals, too-many-return-statements, too-many-branches, too-many-statements, broad-except
11
15
  def cmd_test_main(
12
16
  ctx: click.Context,
13
17
  prompt_file: str,
14
18
  code_file: str,
15
- output: Optional[str],
16
- language: Optional[str],
17
- coverage_report: Optional[str],
18
- existing_tests: Optional[str],
19
- target_coverage: Optional[float],
20
- merge: Optional[bool],
21
- ) -> Tuple[str, float, str]:
19
+ output: str | None,
20
+ language: str | None,
21
+ coverage_report: str | None,
22
+ existing_tests: str | None,
23
+ target_coverage: float | None,
24
+ merge: bool | None,
25
+ ) -> tuple[str, float, str]:
22
26
  """
23
27
  CLI wrapper for generating or enhancing unit tests.
24
28
 
@@ -29,15 +33,15 @@ def cmd_test_main(
29
33
  ctx (click.Context): The Click context object.
30
34
  prompt_file (str): Path to the prompt file.
31
35
  code_file (str): Path to the code file.
32
- output (Optional[str]): Path to save the generated test file.
33
- language (Optional[str]): Programming language.
34
- coverage_report (Optional[str]): Path to the coverage report file.
35
- existing_tests (Optional[str]): Path to the existing unit test file.
36
- target_coverage (Optional[float]): Desired code coverage percentage.
37
- merge (Optional[bool]): Whether to merge new tests with existing tests.
36
+ output (str | None): Path to save the generated test file.
37
+ language (str | None): Programming language.
38
+ coverage_report (str | None): Path to the coverage report file.
39
+ existing_tests (str | None): Path to the existing unit test file.
40
+ target_coverage (float | None): Desired code coverage percentage.
41
+ merge (bool | None): Whether to merge new tests with existing tests.
38
42
 
39
43
  Returns:
40
- Tuple[str, float, str]: Generated unit test code, total cost, and model name.
44
+ tuple[str, float, str]: Generated unit test code, total cost, and model name.
41
45
  """
42
46
  # Initialize variables
43
47
  unit_test = ""
@@ -84,8 +88,11 @@ def cmd_test_main(
84
88
  command="test",
85
89
  command_options=command_options,
86
90
  )
87
- except Exception as e:
88
- print(f"[bold red]Error constructing paths: {e}[/bold red]")
91
+ except Exception as exception:
92
+ # Catching a general exception is necessary here to handle a wide range of
93
+ # potential errors during file I/O and path construction, ensuring the
94
+ # CLI remains robust.
95
+ print(f"[bold red]Error constructing paths: {exception}[/bold red]")
89
96
  ctx.exit(1)
90
97
  return "", 0.0, ""
91
98
 
@@ -98,19 +105,24 @@ def cmd_test_main(
98
105
  unit_test, total_cost, model_name = generate_test(
99
106
  input_strings["prompt_file"],
100
107
  input_strings["code_file"],
101
- strength,
102
- temperature,
103
- language,
108
+ strength=strength,
109
+ temperature=temperature,
104
110
  time=time,
111
+ language=language,
112
+ verbose=verbose,
105
113
  )
106
- except Exception as e:
107
- print(f"[bold red]Error generating tests: {e}[/bold red]")
114
+ except Exception as exception:
115
+ # A general exception is caught to handle various errors that can occur
116
+ # during the test generation process, which involves external model
117
+ # interactions and complex logic.
118
+ print(f"[bold red]Error generating tests: {exception}[/bold red]")
108
119
  ctx.exit(1)
109
120
  return "", 0.0, ""
110
121
  else:
111
122
  if not existing_tests:
112
123
  print(
113
- "[bold red]Error: --existing-tests is required when using --coverage-report[/bold red]"
124
+ "[bold red]Error: --existing-tests is required "
125
+ "when using --coverage-report[/bold red]"
114
126
  )
115
127
  ctx.exit(1)
116
128
  return "", 0.0, ""
@@ -126,8 +138,11 @@ def cmd_test_main(
126
138
  time=time,
127
139
  verbose=verbose,
128
140
  )
129
- except Exception as e:
130
- print(f"[bold red]Error increasing test coverage: {e}[/bold red]")
141
+ except Exception as exception:
142
+ # This broad exception is used to catch any issue that might arise
143
+ # while increasing test coverage, including problems with parsing
144
+ # reports or interacting with the language model.
145
+ print(f"[bold red]Error increasing test coverage: {exception}[/bold red]")
131
146
  ctx.exit(1)
132
147
  return "", 0.0, ""
133
148
 
@@ -141,13 +156,14 @@ def cmd_test_main(
141
156
  ctx.exit(1)
142
157
  return "", 0.0, ""
143
158
  try:
144
- with open(output_file, "w") as f:
145
- f.write(unit_test)
146
- print(
147
- f"[bold green]Unit tests saved to:[/bold green] {output_file}"
148
- )
149
- except Exception as e:
150
- print(f"[bold red]Error saving tests to file: {e}[/bold red]")
159
+ with open(output_file, "w", encoding="utf-8") as file_handle:
160
+ file_handle.write(unit_test)
161
+ print(f"[bold green]Unit tests saved to:[/bold green] {output_file}")
162
+ except Exception as exception:
163
+ # A broad exception is caught here to handle potential file system errors
164
+ # (e.g., permissions, disk space) that can occur when writing the
165
+ # output file, preventing the program from crashing unexpectedly.
166
+ print(f"[bold red]Error saving tests to file: {exception}[/bold red]")
151
167
  ctx.exit(1)
152
168
  return "", 0.0, ""
153
169
 
@@ -155,4 +171,4 @@ def cmd_test_main(
155
171
  print(f"[bold blue]Total cost:[/bold blue] ${total_cost:.6f}")
156
172
  print(f"[bold blue]Model used:[/bold blue] {model_name}")
157
173
 
158
- return unit_test, total_cost, model_name
174
+ return unit_test, total_cost, model_name
pdd/crash_main.py CHANGED
@@ -131,22 +131,23 @@ def crash_main(
131
131
  # Removed fallback to original content if final_code/final_program are empty
132
132
  # An empty string from a fix function means no valid update.
133
133
 
134
- # Determine whether to write the files based on whether paths are provided AND content was updated
134
+ # Determine whether to write the files based on whether paths are provided
135
135
  output_code_path_str = output_file_paths.get("output")
136
136
  output_program_path_str = output_file_paths.get("output_program")
137
137
 
138
- # Write output files only if updated and path provided
139
- if output_code_path_str and code_updated:
138
+ # Write output files if path provided (always write for regression compatibility)
139
+ # Use fixed content if available, otherwise use original content
140
+ if output_code_path_str:
140
141
  output_code_path = Path(output_code_path_str)
141
142
  output_code_path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
142
143
  with open(output_code_path, "w") as f:
143
- f.write(final_code)
144
+ f.write(final_code if final_code else original_code_content)
144
145
 
145
- if output_program_path_str and program_updated:
146
+ if output_program_path_str:
146
147
  output_program_path = Path(output_program_path_str)
147
148
  output_program_path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
148
149
  with open(output_program_path, "w") as f:
149
- f.write(final_program)
150
+ f.write(final_program if final_program else original_program_content)
150
151
 
151
152
  # Provide user feedback
152
153
  if not quiet:
@@ -162,13 +163,13 @@ def crash_main(
162
163
  if code_updated:
163
164
  rprint(f"[bold]Fixed code saved to:[/bold] {output_code_path_str}")
164
165
  else:
165
- rprint(f"[info]Code file {Path(code_file).name} was not modified. Output file {output_code_path_str} not written.[/info]")
166
+ rprint(f"[info]Code file {Path(code_file).name} was not modified. Original content saved to {output_code_path_str}.[/info]")
166
167
 
167
168
  if output_program_path_str:
168
169
  if program_updated:
169
170
  rprint(f"[bold]Fixed program saved to:[/bold] {output_program_path_str}")
170
171
  else:
171
- rprint(f"[info]Program file {Path(program_file).name} was not modified. Output file {output_program_path_str} not written.[/info]")
172
+ rprint(f"[info]Program file {Path(program_file).name} was not modified. Original content saved to {output_program_path_str}.[/info]")
172
173
 
173
174
  return success, final_code, final_program, attempts, cost, model
174
175
 
pdd/data/llm_model.csv CHANGED
@@ -6,7 +6,7 @@ OpenAI,deepseek/deepseek-chat,.27,1.1,1353,https://api.deepseek.com/beta,DEEPSEE
6
6
  Google,vertex_ai/gemini-2.5-flash-preview-04-17,0.15,0.6,1330,,VERTEX_CREDENTIALS,0,True,effort
7
7
  Google,gemini-2.5-pro-exp-03-25,1.25,10.0,1360,,GOOGLE_API_KEY,0,True,none
8
8
  Anthropic,claude-sonnet-4-20250514,3.0,15.0,1340,,ANTHROPIC_API_KEY,64000,True,budget
9
- Google,vertex_ai/gemini-2.5-pro-preview-05-06,1.25,10.0,1361,,VERTEX_CREDENTIALS,0,True,none
9
+ Google,vertex_ai/gemini-2.5-pro,1.25,10.0,1361,,VERTEX_CREDENTIALS,0,True,none
10
10
  OpenAI,o4-mini,1.1,4.4,1333,,OPENAI_API_KEY,0,True,effort
11
11
  OpenAI,o3,10.0,40.0,1389,,OPENAI_API_KEY,0,True,effort
12
12
  OpenAI,gpt-4.1,2.0,8.0,1335,,OPENAI_API_KEY,0,True,none
@@ -268,6 +268,13 @@ def fix_verification_errors(
268
268
  fixed_program = fix_result_obj.fixed_program
269
269
  fixed_code = fix_result_obj.fixed_code
270
270
  fix_explanation = fix_result_obj.explanation
271
+
272
+ # Unescape literal \n strings to actual newlines
273
+ if fixed_program:
274
+ fixed_program = fixed_program.replace('\\n', '\n')
275
+ if fixed_code:
276
+ fixed_code = fixed_code.replace('\\n', '\n')
277
+
271
278
  parsed_fix_successfully = True
272
279
  if verbose:
273
280
  rprint("[green]Successfully parsed structured output for fix.[/green]")
@@ -282,6 +289,12 @@ def fix_verification_errors(
282
289
  fixed_code_candidate = code_match.group(1).strip() if (code_match and code_match.group(1)) else None
283
290
  fix_explanation_candidate = explanation_match.group(1).strip() if (explanation_match and explanation_match.group(1)) else None
284
291
 
292
+ # Unescape literal \n strings to actual newlines
293
+ if fixed_program_candidate:
294
+ fixed_program_candidate = fixed_program_candidate.replace('\\n', '\n')
295
+ if fixed_code_candidate:
296
+ fixed_code_candidate = fixed_code_candidate.replace('\\n', '\n')
297
+
285
298
  fixed_program = fixed_program_candidate if fixed_program_candidate else program
286
299
  fixed_code = fixed_code_candidate if fixed_code_candidate else code
287
300
  fix_explanation = fix_explanation_candidate if fix_explanation_candidate else "[Fix explanation not provided by LLM]"
@@ -382,7 +382,7 @@ def fix_verification_main(
382
382
  if final_code is not None:
383
383
  rich_print(f" len(final_code): {len(final_code)}")
384
384
 
385
- if success and output_code_path and final_code is not None:
385
+ if output_code_path and final_code is not None:
386
386
  try:
387
387
  if verbose:
388
388
  rich_print(f"[cyan bold DEBUG] In fix_verification_main, ATTEMPTING to write code to: {output_code_path!r}")
@@ -402,7 +402,7 @@ def fix_verification_main(
402
402
  if final_program is not None:
403
403
  rich_print(f" len(final_program): {len(final_program)}")
404
404
 
405
- if success and output_program_path and final_program is not None:
405
+ if output_program_path and final_program is not None:
406
406
  try:
407
407
  if verbose:
408
408
  rich_print(f"[cyan bold DEBUG] In fix_verification_main, ATTEMPTING to write program to: {output_program_path!r}")