pdd-cli 0.0.40__py3-none-any.whl → 0.0.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pdd/__init__.py +1 -1
- pdd/auto_deps_main.py +1 -1
- pdd/auto_update.py +73 -78
- pdd/bug_main.py +3 -3
- pdd/bug_to_unit_test.py +46 -38
- pdd/change.py +20 -13
- pdd/change_main.py +223 -163
- pdd/cli.py +192 -95
- pdd/cmd_test_main.py +51 -36
- pdd/code_generator_main.py +3 -2
- pdd/conflicts_main.py +1 -1
- pdd/construct_paths.py +221 -19
- pdd/context_generator_main.py +27 -12
- pdd/crash_main.py +44 -50
- pdd/data/llm_model.csv +1 -1
- pdd/detect_change_main.py +1 -1
- pdd/fix_code_module_errors.py +12 -0
- pdd/fix_main.py +2 -2
- pdd/fix_verification_errors.py +13 -0
- pdd/fix_verification_main.py +3 -3
- pdd/generate_output_paths.py +113 -21
- pdd/generate_test.py +53 -16
- pdd/llm_invoke.py +162 -0
- pdd/logo_animation.py +455 -0
- pdd/preprocess_main.py +1 -1
- pdd/process_csv_change.py +1 -1
- pdd/prompts/extract_program_code_fix_LLM.prompt +2 -1
- pdd/prompts/sync_analysis_LLM.prompt +82 -0
- pdd/split_main.py +1 -1
- pdd/sync_animation.py +643 -0
- pdd/sync_determine_operation.py +1039 -0
- pdd/sync_main.py +333 -0
- pdd/sync_orchestration.py +639 -0
- pdd/trace_main.py +1 -1
- pdd/update_main.py +7 -2
- pdd/xml_tagger.py +15 -6
- pdd_cli-0.0.42.dist-info/METADATA +307 -0
- {pdd_cli-0.0.40.dist-info → pdd_cli-0.0.42.dist-info}/RECORD +42 -36
- pdd_cli-0.0.40.dist-info/METADATA +0 -269
- {pdd_cli-0.0.40.dist-info → pdd_cli-0.0.42.dist-info}/WHEEL +0 -0
- {pdd_cli-0.0.40.dist-info → pdd_cli-0.0.42.dist-info}/entry_points.txt +0 -0
- {pdd_cli-0.0.40.dist-info → pdd_cli-0.0.42.dist-info}/licenses/LICENSE +0 -0
- {pdd_cli-0.0.40.dist-info → pdd_cli-0.0.42.dist-info}/top_level.txt +0 -0
pdd/cli.py
CHANGED
|
@@ -1,4 +1,10 @@
|
|
|
1
1
|
# pdd/cli.py
|
|
2
|
+
"""
|
|
3
|
+
Command Line Interface (CLI) for the PDD (Prompt-Driven Development) tool.
|
|
4
|
+
|
|
5
|
+
This module provides the main CLI functionality for PDD, including commands for
|
|
6
|
+
generating code, tests, fixing issues, and managing prompts.
|
|
7
|
+
"""
|
|
2
8
|
from __future__ import annotations
|
|
3
9
|
|
|
4
10
|
import os
|
|
@@ -29,6 +35,7 @@ from .fix_verification_main import fix_verification_main
|
|
|
29
35
|
from .install_completion import install_completion, get_local_pdd_path
|
|
30
36
|
from .preprocess_main import preprocess_main
|
|
31
37
|
from .split_main import split_main
|
|
38
|
+
from .sync_main import sync_main
|
|
32
39
|
from .trace_main import trace_main
|
|
33
40
|
from .track_cost import track_cost
|
|
34
41
|
from .update_main import update_main
|
|
@@ -47,23 +54,23 @@ custom_theme = Theme({
|
|
|
47
54
|
console = Console(theme=custom_theme)
|
|
48
55
|
|
|
49
56
|
# --- Helper Function for Error Handling ---
|
|
50
|
-
def handle_error(
|
|
57
|
+
def handle_error(exception: Exception, command_name: str, quiet: bool):
|
|
51
58
|
"""Prints error messages using Rich console.""" # Modified docstring
|
|
52
59
|
if not quiet:
|
|
53
60
|
console.print(f"[error]Error during '{command_name}' command:[/error]", style="error")
|
|
54
|
-
if isinstance(
|
|
55
|
-
console.print(f" [error]File not found:[/error] {
|
|
56
|
-
elif isinstance(
|
|
57
|
-
console.print(f" [error]Input/Output Error:[/error] {
|
|
58
|
-
elif isinstance(
|
|
59
|
-
console.print(f" [error]Usage Error:[/error] {
|
|
61
|
+
if isinstance(exception, FileNotFoundError):
|
|
62
|
+
console.print(f" [error]File not found:[/error] {exception}", style="error")
|
|
63
|
+
elif isinstance(exception, (ValueError, IOError)):
|
|
64
|
+
console.print(f" [error]Input/Output Error:[/error] {exception}", style="error")
|
|
65
|
+
elif isinstance(exception, click.UsageError): # Handle Click usage errors explicitly if needed
|
|
66
|
+
console.print(f" [error]Usage Error:[/error] {exception}", style="error")
|
|
60
67
|
# click.UsageError should typically exit with 2, but we are handling it.
|
|
61
|
-
elif isinstance(
|
|
68
|
+
elif isinstance(exception, MarkupError):
|
|
62
69
|
console.print(" [error]Markup Error:[/error] Invalid Rich markup encountered.", style="error")
|
|
63
70
|
# Print the error message safely escaped
|
|
64
|
-
console.print(escape(str(
|
|
71
|
+
console.print(escape(str(exception)))
|
|
65
72
|
else:
|
|
66
|
-
console.print(f" [error]An unexpected error occurred:[/error] {
|
|
73
|
+
console.print(f" [error]An unexpected error occurred:[/error] {exception}", style="error")
|
|
67
74
|
# Do NOT re-raise e here. Let the command function return None.
|
|
68
75
|
|
|
69
76
|
|
|
@@ -170,9 +177,12 @@ def cli(
|
|
|
170
177
|
console.print("[info]Checking for updates...[/info]")
|
|
171
178
|
# Removed quiet=quiet argument as it caused TypeError
|
|
172
179
|
auto_update()
|
|
173
|
-
except Exception as
|
|
180
|
+
except Exception as exception: # Using more descriptive name
|
|
174
181
|
if not quiet:
|
|
175
|
-
console.print(
|
|
182
|
+
console.print(
|
|
183
|
+
f"[warning]Auto-update check failed:[/warning] {exception}",
|
|
184
|
+
style="warning"
|
|
185
|
+
)
|
|
176
186
|
|
|
177
187
|
# --- Result Callback for Chained Commands ---
|
|
178
188
|
@cli.result_callback()
|
|
@@ -200,45 +210,45 @@ def process_commands(ctx: click.Context, results: List[Optional[Tuple[Any, float
|
|
|
200
210
|
|
|
201
211
|
# Check if the command failed (returned None)
|
|
202
212
|
if result_tuple is None:
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
213
|
+
if not ctx.obj.get("quiet"):
|
|
214
|
+
# Check if it was install_completion (which normally returns None)
|
|
215
|
+
if command_name == "install_completion":
|
|
216
|
+
console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed.")
|
|
217
|
+
# If command name is unknown, and it might be install_completion which prints its own status
|
|
218
|
+
elif command_name.startswith("Unknown Command"):
|
|
219
|
+
console.print(f" [info]Step {i+1} ({command_name}):[/info] Command executed (see output above for status details).")
|
|
220
|
+
# Check if it was preprocess (which returns a dummy tuple on success)
|
|
221
|
+
# This case handles actual failure for preprocess
|
|
222
|
+
elif command_name == "preprocess":
|
|
223
|
+
console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
|
|
224
|
+
else:
|
|
225
|
+
console.print(f" [error]Step {i+1} ({command_name}):[/error] Command failed.")
|
|
216
226
|
# Check if the result is the expected tuple structure from @track_cost or preprocess success
|
|
217
227
|
elif isinstance(result_tuple, tuple) and len(result_tuple) == 3:
|
|
218
228
|
_result_data, cost, model_name = result_tuple
|
|
219
229
|
total_chain_cost += cost
|
|
220
230
|
if not ctx.obj.get("quiet"):
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
231
|
+
# Special handling for preprocess success message (check actual command name)
|
|
232
|
+
actual_command_name = invoked_subcommands[i] if i < num_commands else None # Get actual name if possible
|
|
233
|
+
if actual_command_name == "preprocess" and cost == 0.0 and model_name == "local":
|
|
234
|
+
console.print(f" [info]Step {i+1} ({command_name}):[/info] Command completed (local).")
|
|
235
|
+
else:
|
|
236
|
+
# Generic output using potentially "Unknown Command" name
|
|
237
|
+
console.print(f" [info]Step {i+1} ({command_name}):[/info] Cost: ${cost:.6f}, Model: {model_name}")
|
|
228
238
|
else:
|
|
229
239
|
# Handle unexpected return types if necessary
|
|
230
240
|
if not ctx.obj.get("quiet"):
|
|
231
|
-
|
|
232
|
-
|
|
241
|
+
# Provide more detail on the unexpected type
|
|
242
|
+
console.print(f" [warning]Step {i+1} ({command_name}):[/warning] Unexpected result format: {type(result_tuple).__name__} - {str(result_tuple)[:50]}...")
|
|
233
243
|
|
|
234
244
|
|
|
235
245
|
if not ctx.obj.get("quiet"):
|
|
236
246
|
# Only print total cost if at least one command potentially contributed cost
|
|
237
247
|
if any(res is not None and isinstance(res, tuple) and len(res) == 3 for res in results):
|
|
238
|
-
|
|
248
|
+
console.print(f"[info]Total Estimated Cost for Chain:[/info] ${total_chain_cost:.6f}")
|
|
239
249
|
# Indicate if the chain might have been incomplete due to errors
|
|
240
250
|
if num_results < num_commands and not all(res is None for res in results): # Avoid printing if all failed
|
|
241
|
-
|
|
251
|
+
console.print("[warning]Note: Chain may have terminated early due to errors.[/warning]")
|
|
242
252
|
console.print("[info]-------------------------------------[/info]")
|
|
243
253
|
|
|
244
254
|
|
|
@@ -248,7 +258,7 @@ def process_commands(ctx: click.Context, results: List[Optional[Tuple[Any, float
|
|
|
248
258
|
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
249
259
|
@click.option(
|
|
250
260
|
"--output",
|
|
251
|
-
type=click.Path(writable=True),
|
|
261
|
+
type=click.Path(writable=True),
|
|
252
262
|
default=None,
|
|
253
263
|
help="Specify where to save the generated code (file or directory).",
|
|
254
264
|
)
|
|
@@ -274,10 +284,8 @@ def generate(
|
|
|
274
284
|
output: Optional[str],
|
|
275
285
|
original_prompt_file_path: Optional[str],
|
|
276
286
|
force_incremental_flag: bool,
|
|
277
|
-
) -> Optional[Tuple[str, float, str]]:
|
|
278
|
-
"""
|
|
279
|
-
quiet = ctx.obj.get("quiet", False)
|
|
280
|
-
command_name = "generate"
|
|
287
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
288
|
+
"""Generate code from a prompt file."""
|
|
281
289
|
try:
|
|
282
290
|
generated_code, incremental, total_cost, model_name = code_generator_main(
|
|
283
291
|
ctx=ctx,
|
|
@@ -287,9 +295,9 @@ def generate(
|
|
|
287
295
|
force_incremental_flag=force_incremental_flag,
|
|
288
296
|
)
|
|
289
297
|
return generated_code, total_cost, model_name
|
|
290
|
-
except Exception as
|
|
291
|
-
handle_error(
|
|
292
|
-
return None
|
|
298
|
+
except Exception as exception:
|
|
299
|
+
handle_error(exception, "generate", ctx.obj.get("quiet", False))
|
|
300
|
+
return None
|
|
293
301
|
|
|
294
302
|
|
|
295
303
|
@cli.command("example")
|
|
@@ -303,10 +311,13 @@ def generate(
|
|
|
303
311
|
)
|
|
304
312
|
@click.pass_context
|
|
305
313
|
@track_cost
|
|
306
|
-
def example(
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
314
|
+
def example(
|
|
315
|
+
ctx: click.Context,
|
|
316
|
+
prompt_file: str,
|
|
317
|
+
code_file: str,
|
|
318
|
+
output: Optional[str]
|
|
319
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
320
|
+
"""Generate example code for a given prompt and implementation."""
|
|
310
321
|
try:
|
|
311
322
|
example_code, total_cost, model_name = context_generator_main(
|
|
312
323
|
ctx=ctx,
|
|
@@ -315,9 +326,9 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
|
|
|
315
326
|
output=output,
|
|
316
327
|
)
|
|
317
328
|
return example_code, total_cost, model_name
|
|
318
|
-
except Exception as
|
|
319
|
-
handle_error(
|
|
320
|
-
return None
|
|
329
|
+
except Exception as exception:
|
|
330
|
+
handle_error(exception, "example", ctx.obj.get("quiet", False))
|
|
331
|
+
return None
|
|
321
332
|
|
|
322
333
|
|
|
323
334
|
@cli.command("test")
|
|
@@ -329,7 +340,12 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
|
|
|
329
340
|
default=None,
|
|
330
341
|
help="Specify where to save the generated test file (file or directory).",
|
|
331
342
|
)
|
|
332
|
-
@click.option(
|
|
343
|
+
@click.option(
|
|
344
|
+
"--language",
|
|
345
|
+
type=str,
|
|
346
|
+
default=None,
|
|
347
|
+
help="Specify the programming language."
|
|
348
|
+
)
|
|
333
349
|
@click.option(
|
|
334
350
|
"--coverage-report",
|
|
335
351
|
type=click.Path(exists=True, dir_okay=False),
|
|
@@ -345,7 +361,7 @@ def example(ctx: click.Context, prompt_file: str, code_file: str, output: Option
|
|
|
345
361
|
@click.option(
|
|
346
362
|
"--target-coverage",
|
|
347
363
|
type=click.FloatRange(0.0, 100.0),
|
|
348
|
-
default=None,
|
|
364
|
+
default=None, # Use None, default handled in cmd_test_main or env var
|
|
349
365
|
help="Desired code coverage percentage (default: 90.0 or PDD_TEST_COVERAGE_TARGET).",
|
|
350
366
|
)
|
|
351
367
|
@click.option(
|
|
@@ -366,12 +382,10 @@ def test(
|
|
|
366
382
|
existing_tests: Optional[str],
|
|
367
383
|
target_coverage: Optional[float],
|
|
368
384
|
merge: bool,
|
|
369
|
-
) -> Optional[Tuple[str, float, str]]:
|
|
370
|
-
"""Generate
|
|
371
|
-
quiet = ctx.obj.get("quiet", False)
|
|
372
|
-
command_name = "test"
|
|
385
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
386
|
+
"""Generate unit tests for a given prompt and implementation."""
|
|
373
387
|
try:
|
|
374
|
-
|
|
388
|
+
test_code, total_cost, model_name = cmd_test_main(
|
|
375
389
|
ctx=ctx,
|
|
376
390
|
prompt_file=prompt_file,
|
|
377
391
|
code_file=code_file,
|
|
@@ -382,10 +396,10 @@ def test(
|
|
|
382
396
|
target_coverage=target_coverage,
|
|
383
397
|
merge=merge,
|
|
384
398
|
)
|
|
385
|
-
return
|
|
386
|
-
except Exception as
|
|
387
|
-
handle_error(
|
|
388
|
-
return None
|
|
399
|
+
return test_code, total_cost, model_name
|
|
400
|
+
except Exception as exception:
|
|
401
|
+
handle_error(exception, "test", ctx.obj.get("quiet", False))
|
|
402
|
+
return None
|
|
389
403
|
|
|
390
404
|
|
|
391
405
|
@cli.command("preprocess")
|
|
@@ -430,12 +444,12 @@ def preprocess(
|
|
|
430
444
|
recursive: bool,
|
|
431
445
|
double: bool,
|
|
432
446
|
exclude: Optional[Tuple[str, ...]],
|
|
433
|
-
) -> Optional[Tuple[str, float, str]]:
|
|
434
|
-
"""Preprocess prompt
|
|
435
|
-
quiet = ctx.obj.get("quiet", False)
|
|
436
|
-
command_name = "preprocess"
|
|
447
|
+
) -> Optional[Tuple[str, float, str]]:
|
|
448
|
+
"""Preprocess a prompt file to prepare it for LLM use."""
|
|
437
449
|
try:
|
|
438
|
-
|
|
450
|
+
# Since preprocess is a local operation, we don't track cost
|
|
451
|
+
# But we need to return a tuple in the expected format for result callback
|
|
452
|
+
result = preprocess_main(
|
|
439
453
|
ctx=ctx,
|
|
440
454
|
prompt_file=prompt_file,
|
|
441
455
|
output=output,
|
|
@@ -444,18 +458,25 @@ def preprocess(
|
|
|
444
458
|
double=double,
|
|
445
459
|
exclude=list(exclude) if exclude else [],
|
|
446
460
|
)
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
461
|
+
|
|
462
|
+
# Handle the result from preprocess_main
|
|
463
|
+
if result is None:
|
|
464
|
+
# If preprocess_main returns None, still return a dummy tuple for the callback
|
|
465
|
+
return "", 0.0, "local"
|
|
466
|
+
else:
|
|
467
|
+
# Unpack the return value from preprocess_main
|
|
468
|
+
processed_prompt, total_cost, model_name = result
|
|
469
|
+
return processed_prompt, total_cost, model_name
|
|
470
|
+
except Exception as exception:
|
|
471
|
+
handle_error(exception, "preprocess", ctx.obj.get("quiet", False))
|
|
472
|
+
return None
|
|
452
473
|
|
|
453
474
|
|
|
454
475
|
@cli.command("fix")
|
|
455
476
|
@click.argument("prompt_file", type=click.Path(exists=True, dir_okay=False))
|
|
456
477
|
@click.argument("code_file", type=click.Path(exists=True, dir_okay=False))
|
|
457
478
|
@click.argument("unit_test_file", type=click.Path(exists=True, dir_okay=False))
|
|
458
|
-
@click.argument("error_file", type=click.Path(dir_okay=False))
|
|
479
|
+
@click.argument("error_file", type=click.Path(dir_okay=False)) # Allow non-existent for loop mode
|
|
459
480
|
@click.option(
|
|
460
481
|
"--output-test",
|
|
461
482
|
type=click.Path(writable=True),
|
|
@@ -474,7 +495,12 @@ def preprocess(
|
|
|
474
495
|
default=None,
|
|
475
496
|
help="Specify where to save the results log (file or directory).",
|
|
476
497
|
)
|
|
477
|
-
@click.option(
|
|
498
|
+
@click.option(
|
|
499
|
+
"--loop",
|
|
500
|
+
is_flag=True,
|
|
501
|
+
default=False,
|
|
502
|
+
help="Enable iterative fixing process."
|
|
503
|
+
)
|
|
478
504
|
@click.option(
|
|
479
505
|
"--verification-program",
|
|
480
506
|
type=click.Path(exists=True, dir_okay=False),
|
|
@@ -502,7 +528,7 @@ def preprocess(
|
|
|
502
528
|
help="Automatically submit the example if all unit tests pass.",
|
|
503
529
|
)
|
|
504
530
|
@click.pass_context
|
|
505
|
-
@track_cost
|
|
531
|
+
@track_cost
|
|
506
532
|
def fix(
|
|
507
533
|
ctx: click.Context,
|
|
508
534
|
prompt_file: str,
|
|
@@ -517,15 +543,11 @@ def fix(
|
|
|
517
543
|
max_attempts: int,
|
|
518
544
|
budget: float,
|
|
519
545
|
auto_submit: bool,
|
|
520
|
-
) -> Optional[Tuple[Dict[str, Any], float, str]]:
|
|
521
|
-
"""Fix
|
|
522
|
-
quiet = ctx.obj.get("quiet", False)
|
|
523
|
-
command_name = "fix"
|
|
546
|
+
) -> Optional[Tuple[Dict[str, Any], float, str]]:
|
|
547
|
+
"""Fix code based on a prompt and unit test errors."""
|
|
524
548
|
try:
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
# We need to adapt this to the (result, cost, model) structure for the callback
|
|
528
|
-
success, fixed_test, fixed_code, attempts, cost, model = fix_main(
|
|
549
|
+
# The actual logic is in fix_main
|
|
550
|
+
success, fixed_unit_test, fixed_code, attempts, total_cost, model_name = fix_main(
|
|
529
551
|
ctx=ctx,
|
|
530
552
|
prompt_file=prompt_file,
|
|
531
553
|
code_file=code_file,
|
|
@@ -540,18 +562,16 @@ def fix(
|
|
|
540
562
|
budget=budget,
|
|
541
563
|
auto_submit=auto_submit,
|
|
542
564
|
)
|
|
543
|
-
|
|
544
|
-
result_data = {
|
|
565
|
+
result = {
|
|
545
566
|
"success": success,
|
|
567
|
+
"fixed_unit_test": fixed_unit_test,
|
|
568
|
+
"fixed_code": fixed_code,
|
|
546
569
|
"attempts": attempts,
|
|
547
|
-
"fixed_test_path": output_test,
|
|
548
|
-
"fixed_code_path": output_code,
|
|
549
|
-
"results_log_path": output_results,
|
|
550
570
|
}
|
|
551
|
-
return
|
|
552
|
-
except Exception as
|
|
553
|
-
handle_error(
|
|
554
|
-
return None
|
|
571
|
+
return result, total_cost, model_name
|
|
572
|
+
except Exception as exception:
|
|
573
|
+
handle_error(exception, "fix", ctx.obj.get("quiet", False))
|
|
574
|
+
return None
|
|
555
575
|
|
|
556
576
|
|
|
557
577
|
@cli.command("split")
|
|
@@ -676,7 +696,7 @@ def change(
|
|
|
676
696
|
"--output",
|
|
677
697
|
type=click.Path(writable=True),
|
|
678
698
|
default=None,
|
|
679
|
-
help="Specify where to save the updated prompt file
|
|
699
|
+
help="Specify where to save the updated prompt file. If not specified, overwrites the original prompt file to maintain it as the source of truth.",
|
|
680
700
|
)
|
|
681
701
|
@click.option(
|
|
682
702
|
"--git",
|
|
@@ -851,8 +871,8 @@ def crash(
|
|
|
851
871
|
result_data = {
|
|
852
872
|
"success": success,
|
|
853
873
|
"attempts": attempts,
|
|
854
|
-
"
|
|
855
|
-
"
|
|
874
|
+
"fixed_code": fixed_code,
|
|
875
|
+
"fixed_program": fixed_program,
|
|
856
876
|
}
|
|
857
877
|
return result_data, cost, model
|
|
858
878
|
except Exception as e:
|
|
@@ -1079,6 +1099,83 @@ def verify(
|
|
|
1079
1099
|
return None # Return None on failure
|
|
1080
1100
|
|
|
1081
1101
|
|
|
1102
|
+
@cli.command("sync")
|
|
1103
|
+
@click.argument("basename", type=str)
|
|
1104
|
+
@click.option(
|
|
1105
|
+
"--max-attempts",
|
|
1106
|
+
type=int,
|
|
1107
|
+
default=3,
|
|
1108
|
+
show_default=True,
|
|
1109
|
+
help="Maximum number of sync attempts.",
|
|
1110
|
+
)
|
|
1111
|
+
@click.option(
|
|
1112
|
+
"--budget",
|
|
1113
|
+
type=float,
|
|
1114
|
+
default=10.0,
|
|
1115
|
+
show_default=True,
|
|
1116
|
+
help="Maximum total cost allowed for the entire sync process.",
|
|
1117
|
+
)
|
|
1118
|
+
@click.option(
|
|
1119
|
+
"--skip-verify",
|
|
1120
|
+
is_flag=True,
|
|
1121
|
+
default=False,
|
|
1122
|
+
help="Skip verification step during sync.",
|
|
1123
|
+
)
|
|
1124
|
+
@click.option(
|
|
1125
|
+
"--skip-tests",
|
|
1126
|
+
is_flag=True,
|
|
1127
|
+
default=False,
|
|
1128
|
+
help="Skip test generation during sync.",
|
|
1129
|
+
)
|
|
1130
|
+
@click.option(
|
|
1131
|
+
"--target-coverage",
|
|
1132
|
+
type=click.FloatRange(0.0, 100.0),
|
|
1133
|
+
default=90.0,
|
|
1134
|
+
show_default=True,
|
|
1135
|
+
help="Target code coverage percentage for generated tests.",
|
|
1136
|
+
)
|
|
1137
|
+
@click.option(
|
|
1138
|
+
"--log",
|
|
1139
|
+
is_flag=True,
|
|
1140
|
+
default=False,
|
|
1141
|
+
help="Enable detailed logging during sync.",
|
|
1142
|
+
)
|
|
1143
|
+
@click.pass_context
|
|
1144
|
+
@track_cost
|
|
1145
|
+
def sync(
|
|
1146
|
+
ctx: click.Context,
|
|
1147
|
+
basename: str,
|
|
1148
|
+
max_attempts: int,
|
|
1149
|
+
budget: float,
|
|
1150
|
+
skip_verify: bool,
|
|
1151
|
+
skip_tests: bool,
|
|
1152
|
+
target_coverage: float,
|
|
1153
|
+
log: bool,
|
|
1154
|
+
) -> Optional[Tuple[Dict[str, Any], float, str]]:
|
|
1155
|
+
"""Automatically execute the complete PDD workflow loop for a given basename.
|
|
1156
|
+
|
|
1157
|
+
This command implements the entire synchronized cycle, intelligently determining
|
|
1158
|
+
what steps are needed and executing them in the correct order. It detects
|
|
1159
|
+
programming languages by scanning for prompt files matching the pattern
|
|
1160
|
+
{basename}_{language}.prompt in the prompts directory.
|
|
1161
|
+
"""
|
|
1162
|
+
try:
|
|
1163
|
+
results, total_cost, model = sync_main(
|
|
1164
|
+
ctx=ctx,
|
|
1165
|
+
basename=basename,
|
|
1166
|
+
max_attempts=max_attempts,
|
|
1167
|
+
budget=budget,
|
|
1168
|
+
skip_verify=skip_verify,
|
|
1169
|
+
skip_tests=skip_tests,
|
|
1170
|
+
target_coverage=target_coverage,
|
|
1171
|
+
log=log,
|
|
1172
|
+
)
|
|
1173
|
+
return results, total_cost, model
|
|
1174
|
+
except Exception as exception:
|
|
1175
|
+
handle_error(exception, "sync", ctx.obj.get("quiet", False))
|
|
1176
|
+
return None
|
|
1177
|
+
|
|
1178
|
+
|
|
1082
1179
|
@cli.command("install_completion")
|
|
1083
1180
|
@click.pass_context
|
|
1084
1181
|
# No @track_cost
|
pdd/cmd_test_main.py
CHANGED
|
@@ -1,24 +1,28 @@
|
|
|
1
|
-
|
|
1
|
+
"""
|
|
2
|
+
Main entry point for the 'test' command.
|
|
3
|
+
"""
|
|
4
|
+
from __future__ import annotations
|
|
2
5
|
import click
|
|
6
|
+
# pylint: disable=redefined-builtin
|
|
3
7
|
from rich import print
|
|
4
|
-
from rich.progress import track
|
|
5
|
-
import os
|
|
6
8
|
|
|
7
9
|
from .construct_paths import construct_paths
|
|
8
10
|
from .generate_test import generate_test
|
|
9
11
|
from .increase_tests import increase_tests
|
|
10
12
|
|
|
13
|
+
|
|
14
|
+
# pylint: disable=too-many-arguments, too-many-locals, too-many-return-statements, too-many-branches, too-many-statements, broad-except
|
|
11
15
|
def cmd_test_main(
|
|
12
16
|
ctx: click.Context,
|
|
13
17
|
prompt_file: str,
|
|
14
18
|
code_file: str,
|
|
15
|
-
output:
|
|
16
|
-
language:
|
|
17
|
-
coverage_report:
|
|
18
|
-
existing_tests:
|
|
19
|
-
target_coverage:
|
|
20
|
-
merge:
|
|
21
|
-
) ->
|
|
19
|
+
output: str | None,
|
|
20
|
+
language: str | None,
|
|
21
|
+
coverage_report: str | None,
|
|
22
|
+
existing_tests: str | None,
|
|
23
|
+
target_coverage: float | None,
|
|
24
|
+
merge: bool | None,
|
|
25
|
+
) -> tuple[str, float, str]:
|
|
22
26
|
"""
|
|
23
27
|
CLI wrapper for generating or enhancing unit tests.
|
|
24
28
|
|
|
@@ -29,15 +33,15 @@ def cmd_test_main(
|
|
|
29
33
|
ctx (click.Context): The Click context object.
|
|
30
34
|
prompt_file (str): Path to the prompt file.
|
|
31
35
|
code_file (str): Path to the code file.
|
|
32
|
-
output (
|
|
33
|
-
language (
|
|
34
|
-
coverage_report (
|
|
35
|
-
existing_tests (
|
|
36
|
-
target_coverage (
|
|
37
|
-
merge (
|
|
36
|
+
output (str | None): Path to save the generated test file.
|
|
37
|
+
language (str | None): Programming language.
|
|
38
|
+
coverage_report (str | None): Path to the coverage report file.
|
|
39
|
+
existing_tests (str | None): Path to the existing unit test file.
|
|
40
|
+
target_coverage (float | None): Desired code coverage percentage.
|
|
41
|
+
merge (bool | None): Whether to merge new tests with existing tests.
|
|
38
42
|
|
|
39
43
|
Returns:
|
|
40
|
-
|
|
44
|
+
tuple[str, float, str]: Generated unit test code, total cost, and model name.
|
|
41
45
|
"""
|
|
42
46
|
# Initialize variables
|
|
43
47
|
unit_test = ""
|
|
@@ -77,15 +81,18 @@ def cmd_test_main(
|
|
|
77
81
|
"target_coverage": target_coverage,
|
|
78
82
|
}
|
|
79
83
|
|
|
80
|
-
input_strings, output_file_paths, language = construct_paths(
|
|
84
|
+
resolved_config, input_strings, output_file_paths, language = construct_paths(
|
|
81
85
|
input_file_paths=input_file_paths,
|
|
82
86
|
force=ctx.obj["force"],
|
|
83
87
|
quiet=ctx.obj["quiet"],
|
|
84
88
|
command="test",
|
|
85
89
|
command_options=command_options,
|
|
86
90
|
)
|
|
87
|
-
except Exception as
|
|
88
|
-
|
|
91
|
+
except Exception as exception:
|
|
92
|
+
# Catching a general exception is necessary here to handle a wide range of
|
|
93
|
+
# potential errors during file I/O and path construction, ensuring the
|
|
94
|
+
# CLI remains robust.
|
|
95
|
+
print(f"[bold red]Error constructing paths: {exception}[/bold red]")
|
|
89
96
|
ctx.exit(1)
|
|
90
97
|
return "", 0.0, ""
|
|
91
98
|
|
|
@@ -102,16 +109,20 @@ def cmd_test_main(
|
|
|
102
109
|
temperature=temperature,
|
|
103
110
|
time=time,
|
|
104
111
|
language=language,
|
|
105
|
-
verbose=verbose
|
|
112
|
+
verbose=verbose,
|
|
106
113
|
)
|
|
107
|
-
except Exception as
|
|
108
|
-
|
|
114
|
+
except Exception as exception:
|
|
115
|
+
# A general exception is caught to handle various errors that can occur
|
|
116
|
+
# during the test generation process, which involves external model
|
|
117
|
+
# interactions and complex logic.
|
|
118
|
+
print(f"[bold red]Error generating tests: {exception}[/bold red]")
|
|
109
119
|
ctx.exit(1)
|
|
110
120
|
return "", 0.0, ""
|
|
111
121
|
else:
|
|
112
122
|
if not existing_tests:
|
|
113
123
|
print(
|
|
114
|
-
"[bold red]Error: --existing-tests is required
|
|
124
|
+
"[bold red]Error: --existing-tests is required "
|
|
125
|
+
"when using --coverage-report[/bold red]"
|
|
115
126
|
)
|
|
116
127
|
ctx.exit(1)
|
|
117
128
|
return "", 0.0, ""
|
|
@@ -127,13 +138,16 @@ def cmd_test_main(
|
|
|
127
138
|
time=time,
|
|
128
139
|
verbose=verbose,
|
|
129
140
|
)
|
|
130
|
-
except Exception as
|
|
131
|
-
|
|
141
|
+
except Exception as exception:
|
|
142
|
+
# This broad exception is used to catch any issue that might arise
|
|
143
|
+
# while increasing test coverage, including problems with parsing
|
|
144
|
+
# reports or interacting with the language model.
|
|
145
|
+
print(f"[bold red]Error increasing test coverage: {exception}[/bold red]")
|
|
132
146
|
ctx.exit(1)
|
|
133
147
|
return "", 0.0, ""
|
|
134
148
|
|
|
135
|
-
# Handle output
|
|
136
|
-
output_file = output_file_paths["output"]
|
|
149
|
+
# Handle output - prioritize orchestration output path over construct_paths result
|
|
150
|
+
output_file = output or output_file_paths["output"]
|
|
137
151
|
if merge and existing_tests:
|
|
138
152
|
output_file = existing_tests
|
|
139
153
|
|
|
@@ -142,13 +156,14 @@ def cmd_test_main(
|
|
|
142
156
|
ctx.exit(1)
|
|
143
157
|
return "", 0.0, ""
|
|
144
158
|
try:
|
|
145
|
-
with open(output_file, "w") as
|
|
146
|
-
|
|
147
|
-
print(
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
159
|
+
with open(output_file, "w", encoding="utf-8") as file_handle:
|
|
160
|
+
file_handle.write(unit_test)
|
|
161
|
+
print(f"[bold green]Unit tests saved to:[/bold green] {output_file}")
|
|
162
|
+
except Exception as exception:
|
|
163
|
+
# A broad exception is caught here to handle potential file system errors
|
|
164
|
+
# (e.g., permissions, disk space) that can occur when writing the
|
|
165
|
+
# output file, preventing the program from crashing unexpectedly.
|
|
166
|
+
print(f"[bold red]Error saving tests to file: {exception}[/bold red]")
|
|
152
167
|
ctx.exit(1)
|
|
153
168
|
return "", 0.0, ""
|
|
154
169
|
|
|
@@ -156,4 +171,4 @@ def cmd_test_main(
|
|
|
156
171
|
print(f"[bold blue]Total cost:[/bold blue] ${total_cost:.6f}")
|
|
157
172
|
print(f"[bold blue]Model used:[/bold blue] {model_name}")
|
|
158
173
|
|
|
159
|
-
return unit_test, total_cost, model_name
|
|
174
|
+
return unit_test, total_cost, model_name
|
pdd/code_generator_main.py
CHANGED
|
@@ -157,7 +157,7 @@ def code_generator_main(
|
|
|
157
157
|
command_options: Dict[str, Any] = {"output": output}
|
|
158
158
|
|
|
159
159
|
try:
|
|
160
|
-
input_strings, output_file_paths, language = construct_paths(
|
|
160
|
+
resolved_config, input_strings, output_file_paths, language = construct_paths(
|
|
161
161
|
input_file_paths=input_file_paths_dict,
|
|
162
162
|
force=force_overwrite,
|
|
163
163
|
quiet=quiet,
|
|
@@ -165,7 +165,8 @@ def code_generator_main(
|
|
|
165
165
|
command_options=command_options,
|
|
166
166
|
)
|
|
167
167
|
prompt_content = input_strings["prompt_file"]
|
|
168
|
-
|
|
168
|
+
# Prioritize orchestration output path over construct_paths result
|
|
169
|
+
output_path = output or output_file_paths.get("output")
|
|
169
170
|
|
|
170
171
|
except FileNotFoundError as e:
|
|
171
172
|
console.print(f"[red]Error: Input file not found: {e.filename}[/red]")
|
pdd/conflicts_main.py
CHANGED
|
@@ -28,7 +28,7 @@ def conflicts_main(ctx: click.Context, prompt1: str, prompt2: str, output: Optio
|
|
|
28
28
|
command_options = {
|
|
29
29
|
"output": output
|
|
30
30
|
}
|
|
31
|
-
input_strings, output_file_paths, _ = construct_paths(
|
|
31
|
+
resolved_config, input_strings, output_file_paths, _ = construct_paths(
|
|
32
32
|
input_file_paths=input_file_paths,
|
|
33
33
|
force=ctx.obj.get('force', False),
|
|
34
34
|
quiet=ctx.obj.get('quiet', False),
|